diff options
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 79 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 50 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 54 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_stats.c | 6 |
4 files changed, 91 insertions, 98 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 2f5801adc739..622013fb7890 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -678,7 +678,7 @@ static struct kobj_type ktype_cpufreq = { */ static int cpufreq_add_dev_policy(unsigned int cpu, struct cpufreq_policy *policy, - struct sys_device *sys_dev) + struct device *dev) { int ret = 0; #ifdef CONFIG_SMP @@ -727,7 +727,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu, spin_unlock_irqrestore(&cpufreq_driver_lock, flags); pr_debug("CPU already managed, adding link\n"); - ret = sysfs_create_link(&sys_dev->kobj, + ret = sysfs_create_link(&dev->kobj, &managed_policy->kobj, "cpufreq"); if (ret) @@ -760,7 +760,7 @@ static int cpufreq_add_dev_symlink(unsigned int cpu, for_each_cpu(j, policy->cpus) { struct cpufreq_policy *managed_policy; - struct sys_device *cpu_sys_dev; + struct device *cpu_dev; if (j == cpu) continue; @@ -769,8 +769,8 @@ static int cpufreq_add_dev_symlink(unsigned int cpu, pr_debug("CPU %u already managed, adding link\n", j); managed_policy = cpufreq_cpu_get(cpu); - cpu_sys_dev = get_cpu_sysdev(j); - ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, + cpu_dev = get_cpu_device(j); + ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq"); if (ret) { cpufreq_cpu_put(managed_policy); @@ -782,7 +782,7 @@ static int cpufreq_add_dev_symlink(unsigned int cpu, static int cpufreq_add_dev_interface(unsigned int cpu, struct cpufreq_policy *policy, - struct sys_device *sys_dev) + struct device *dev) { struct cpufreq_policy new_policy; struct freq_attr **drv_attr; @@ -792,7 +792,7 @@ static int cpufreq_add_dev_interface(unsigned int cpu, /* prepare interface data */ ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, - &sys_dev->kobj, "cpufreq"); + &dev->kobj, "cpufreq"); if (ret) return ret; @@ -865,9 +865,9 @@ err_out_kobj_put: * with with cpu hotplugging and all hell will break loose. Tried to clean this * mess up, but more thorough testing is needed. - Mathieu */ -static int cpufreq_add_dev(struct sys_device *sys_dev) +static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) { - unsigned int cpu = sys_dev->id; + unsigned int cpu = dev->id; int ret = 0, found = 0; struct cpufreq_policy *policy; unsigned long flags; @@ -946,7 +946,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_START, policy); - ret = cpufreq_add_dev_policy(cpu, policy, sys_dev); + ret = cpufreq_add_dev_policy(cpu, policy, dev); if (ret) { if (ret > 0) /* This is a managed cpu, symlink created, @@ -955,7 +955,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) goto err_unlock_policy; } - ret = cpufreq_add_dev_interface(cpu, policy, sys_dev); + ret = cpufreq_add_dev_interface(cpu, policy, dev); if (ret) goto err_out_unregister; @@ -998,15 +998,15 @@ module_out: * Caller should already have policy_rwsem in write mode for this CPU. * This routine frees the rwsem before returning. */ -static int __cpufreq_remove_dev(struct sys_device *sys_dev) +static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) { - unsigned int cpu = sys_dev->id; + unsigned int cpu = dev->id; unsigned long flags; struct cpufreq_policy *data; struct kobject *kobj; struct completion *cmp; #ifdef CONFIG_SMP - struct sys_device *cpu_sys_dev; + struct device *cpu_dev; unsigned int j; #endif @@ -1031,7 +1031,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) pr_debug("removing link\n"); cpumask_clear_cpu(cpu, data->cpus); spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - kobj = &sys_dev->kobj; + kobj = &dev->kobj; cpufreq_cpu_put(data); unlock_policy_rwsem_write(cpu); sysfs_remove_link(kobj, "cpufreq"); @@ -1070,8 +1070,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) strncpy(per_cpu(cpufreq_cpu_governor, j), data->governor->name, CPUFREQ_NAME_LEN); #endif - cpu_sys_dev = get_cpu_sysdev(j); - kobj = &cpu_sys_dev->kobj; + cpu_dev = get_cpu_device(j); + kobj = &cpu_dev->kobj; unlock_policy_rwsem_write(cpu); sysfs_remove_link(kobj, "cpufreq"); lock_policy_rwsem_write(cpu); @@ -1111,11 +1111,11 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) if (unlikely(cpumask_weight(data->cpus) > 1)) { /* first sibling now owns the new sysfs dir */ cpumask_clear_cpu(cpu, data->cpus); - cpufreq_add_dev(get_cpu_sysdev(cpumask_first(data->cpus))); + cpufreq_add_dev(get_cpu_device(cpumask_first(data->cpus)), NULL); /* finally remove our own symlink */ lock_policy_rwsem_write(cpu); - __cpufreq_remove_dev(sys_dev); + __cpufreq_remove_dev(dev, sif); } #endif @@ -1127,9 +1127,9 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) } -static int cpufreq_remove_dev(struct sys_device *sys_dev) +static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) { - unsigned int cpu = sys_dev->id; + unsigned int cpu = dev->id; int retval; if (cpu_is_offline(cpu)) @@ -1138,7 +1138,7 @@ static int cpufreq_remove_dev(struct sys_device *sys_dev) if (unlikely(lock_policy_rwsem_write(cpu))) BUG(); - retval = __cpufreq_remove_dev(sys_dev); + retval = __cpufreq_remove_dev(dev, sif); return retval; } @@ -1270,9 +1270,11 @@ out: } EXPORT_SYMBOL(cpufreq_get); -static struct sysdev_driver cpufreq_sysdev_driver = { - .add = cpufreq_add_dev, - .remove = cpufreq_remove_dev, +static struct subsys_interface cpufreq_interface = { + .name = "cpufreq", + .subsys = &cpu_subsys, + .add_dev = cpufreq_add_dev, + .remove_dev = cpufreq_remove_dev, }; @@ -1764,25 +1766,25 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - struct sys_device *sys_dev; + struct device *dev; - sys_dev = get_cpu_sysdev(cpu); - if (sys_dev) { + dev = get_cpu_device(cpu); + if (dev) { switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: - cpufreq_add_dev(sys_dev); + cpufreq_add_dev(dev, NULL); break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: if (unlikely(lock_policy_rwsem_write(cpu))) BUG(); - __cpufreq_remove_dev(sys_dev); + __cpufreq_remove_dev(dev, NULL); break; case CPU_DOWN_FAILED: case CPU_DOWN_FAILED_FROZEN: - cpufreq_add_dev(sys_dev); + cpufreq_add_dev(dev, NULL); break; } } @@ -1829,8 +1831,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) cpufreq_driver = driver_data; spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - ret = sysdev_driver_register(&cpu_sysdev_class, - &cpufreq_sysdev_driver); + ret = subsys_interface_register(&cpufreq_interface); if (ret) goto err_null_driver; @@ -1849,7 +1850,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) if (ret) { pr_debug("no CPU initialized for driver %s\n", driver_data->name); - goto err_sysdev_unreg; + goto err_if_unreg; } } @@ -1857,9 +1858,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) pr_debug("driver %s up and running\n", driver_data->name); return 0; -err_sysdev_unreg: - sysdev_driver_unregister(&cpu_sysdev_class, - &cpufreq_sysdev_driver); +err_if_unreg: + subsys_interface_unregister(&cpufreq_interface); err_null_driver: spin_lock_irqsave(&cpufreq_driver_lock, flags); cpufreq_driver = NULL; @@ -1886,7 +1886,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) pr_debug("unregistering driver %s\n", driver->name); - sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); + subsys_interface_unregister(&cpufreq_interface); unregister_hotcpu_notifier(&cpufreq_cpu_notifier); spin_lock_irqsave(&cpufreq_driver_lock, flags); @@ -1906,8 +1906,7 @@ static int __init cpufreq_core_init(void) init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); } - cpufreq_global_kobject = kobject_create_and_add("cpufreq", - &cpu_sysdev_class.kset.kobj); + cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj); BUG_ON(!cpufreq_global_kobject); register_syscore_ops(&cpufreq_syscore_ops); diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index c97b468ee9f7..235a340e81f2 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -95,27 +95,26 @@ static struct dbs_tuners { .freq_step = 5, }; -static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, - cputime64_t *wall) +static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) { - cputime64_t idle_time; - cputime64_t cur_wall_time; - cputime64_t busy_time; + u64 idle_time; + u64 cur_wall_time; + u64 busy_time; cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, - kstat_cpu(cpu).cpustat.system); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; - idle_time = cputime64_sub(cur_wall_time, busy_time); + idle_time = cur_wall_time - busy_time; if (wall) - *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); + *wall = jiffies_to_usecs(cur_wall_time); - return (cputime64_t)jiffies_to_usecs(idle_time); + return jiffies_to_usecs(idle_time); } static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) @@ -272,7 +271,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &dbs_info->prev_cpu_wall); if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; } return count; } @@ -353,20 +352,20 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); - wall_time = (unsigned int) cputime64_sub(cur_wall_time, - j_dbs_info->prev_cpu_wall); + wall_time = (unsigned int) + (cur_wall_time - j_dbs_info->prev_cpu_wall); j_dbs_info->prev_cpu_wall = cur_wall_time; - idle_time = (unsigned int) cputime64_sub(cur_idle_time, - j_dbs_info->prev_cpu_idle); + idle_time = (unsigned int) + (cur_idle_time - j_dbs_info->prev_cpu_idle); j_dbs_info->prev_cpu_idle = cur_idle_time; if (dbs_tuners_ins.ignore_nice) { - cputime64_t cur_nice; + u64 cur_nice; unsigned long cur_nice_jiffies; - cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, - j_dbs_info->prev_cpu_nice); + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_dbs_info->prev_cpu_nice; /* * Assumption: nice time between sampling periods will * be less than 2^32 jiffies for 32 bit sys @@ -374,7 +373,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) cur_nice_jiffies = (unsigned long) cputime64_to_jiffies64(cur_nice); - j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; idle_time += jiffies_to_usecs(cur_nice_jiffies); } @@ -501,10 +500,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) { + if (dbs_tuners_ins.ignore_nice) j_dbs_info->prev_cpu_nice = - kstat_cpu(j).cpustat.nice; - } + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; } this_dbs_info->down_skip = 0; this_dbs_info->requested_freq = policy->cur; diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 53ad4c780744..c3e0652520a1 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -119,27 +119,26 @@ static struct dbs_tuners { .powersave_bias = 0, }; -static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, - cputime64_t *wall) +static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) { - cputime64_t idle_time; - cputime64_t cur_wall_time; - cputime64_t busy_time; + u64 idle_time; + u64 cur_wall_time; + u64 busy_time; cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, - kstat_cpu(cpu).cpustat.system); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; - idle_time = cputime64_sub(cur_wall_time, busy_time); + idle_time = cur_wall_time - busy_time; if (wall) - *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); + *wall = jiffies_to_usecs(cur_wall_time); - return (cputime64_t)jiffies_to_usecs(idle_time); + return jiffies_to_usecs(idle_time); } static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) @@ -345,7 +344,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &dbs_info->prev_cpu_wall); if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; } return count; @@ -442,24 +441,24 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); - wall_time = (unsigned int) cputime64_sub(cur_wall_time, - j_dbs_info->prev_cpu_wall); + wall_time = (unsigned int) + (cur_wall_time - j_dbs_info->prev_cpu_wall); j_dbs_info->prev_cpu_wall = cur_wall_time; - idle_time = (unsigned int) cputime64_sub(cur_idle_time, - j_dbs_info->prev_cpu_idle); + idle_time = (unsigned int) + (cur_idle_time - j_dbs_info->prev_cpu_idle); j_dbs_info->prev_cpu_idle = cur_idle_time; - iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, - j_dbs_info->prev_cpu_iowait); + iowait_time = (unsigned int) + (cur_iowait_time - j_dbs_info->prev_cpu_iowait); j_dbs_info->prev_cpu_iowait = cur_iowait_time; if (dbs_tuners_ins.ignore_nice) { - cputime64_t cur_nice; + u64 cur_nice; unsigned long cur_nice_jiffies; - cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, - j_dbs_info->prev_cpu_nice); + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_dbs_info->prev_cpu_nice; /* * Assumption: nice time between sampling periods will * be less than 2^32 jiffies for 32 bit sys @@ -467,7 +466,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) cur_nice_jiffies = (unsigned long) cputime64_to_jiffies64(cur_nice); - j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; idle_time += jiffies_to_usecs(cur_nice_jiffies); } @@ -646,10 +645,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) { + if (dbs_tuners_ins.ignore_nice) j_dbs_info->prev_cpu_nice = - kstat_cpu(j).cpustat.nice; - } + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; } this_dbs_info->cpu = cpu; this_dbs_info->rate_mult = 1; diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index c5072a91e848..b40ee1403be9 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -11,7 +11,6 @@ #include <linux/kernel.h> #include <linux/slab.h> -#include <linux/sysdev.h> #include <linux/cpu.h> #include <linux/sysfs.h> #include <linux/cpufreq.h> @@ -61,9 +60,8 @@ static int cpufreq_stats_update(unsigned int cpu) spin_lock(&cpufreq_stats_lock); stat = per_cpu(cpufreq_stats_table, cpu); if (stat->time_in_state) - stat->time_in_state[stat->last_index] = - cputime64_add(stat->time_in_state[stat->last_index], - cputime_sub(cur_time, stat->last_time)); + stat->time_in_state[stat->last_index] += + cur_time - stat->last_time; stat->last_time = cur_time; spin_unlock(&cpufreq_stats_lock); return 0; |