summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2017-02-24 13:25:14 +0100
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2017-02-27 15:07:38 +0100
commit6dbf5cea05a7098a69f294c96b6d76f08562cae5 (patch)
treeb82971769598b518c88773bbb88874875a91aa62
parentCPU / PM: expose pm_qos_resume_latency for CPUs (diff)
downloadlinux-6dbf5cea05a7098a69f294c96b6d76f08562cae5.tar.xz
linux-6dbf5cea05a7098a69f294c96b6d76f08562cae5.zip
cpuidle: menu: Avoid taking spinlock for accessing QoS values
After commit 9908859acaa9 (cpuidle/menu: add per CPU PM QoS resume latency consideration) the cpuidle menu governor calls dev_pm_qos_read_value() on CPU devices to read the current resume latency QoS constraint values for them. That function takes a spinlock to prevent the device's power.qos pointer from becoming NULL during the access which is a problem for the RT patchset where spinlocks are converted into mutexes and the idle loop stops working. However, it is not even necessary for the menu governor to take that spinlock, because the power.qos pointer accessed under it cannot be modified during the access anyway. For this reason, introduce a "raw" routine for accessing device QoS resume latency constraints without locking and use it in the menu governor. Fixes: 9908859acaa9 (cpuidle/menu: add per CPU PM QoS resume latency consideration) Acked-by: Alex Shi <alex.shi@linaro.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r--drivers/base/power/qos.c3
-rw-r--r--drivers/cpuidle/governors/menu.c2
-rw-r--r--include/linux/pm_qos.h7
3 files changed, 9 insertions, 3 deletions
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 58fcc758334e..73142d086240 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -108,8 +108,7 @@ s32 __dev_pm_qos_read_value(struct device *dev)
{
lockdep_assert_held(&dev->power.lock);
- return IS_ERR_OR_NULL(dev->power.qos) ?
- 0 : pm_qos_read_value(&dev->power.qos->resume_latency);
+ return dev_pm_qos_raw_read_value(dev);
}
/**
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 8d6d25c38c02..6d6f46e79d94 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -287,7 +287,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
unsigned int interactivity_req;
unsigned int expected_interval;
unsigned long nr_iowaiters, cpu_load;
- int resume_latency = dev_pm_qos_read_value(device);
+ int resume_latency = dev_pm_qos_raw_read_value(device);
if (data->needs_update) {
menu_update(drv, dev);
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 0f65d36c2a75..5aba9f47899d 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -173,6 +173,12 @@ static inline s32 dev_pm_qos_requested_flags(struct device *dev)
{
return dev->power.qos->flags_req->data.flr.flags;
}
+
+static inline s32 dev_pm_qos_raw_read_value(struct device *dev)
+{
+ return IS_ERR_OR_NULL(dev->power.qos) ?
+ 0 : pm_qos_read_value(&dev->power.qos->resume_latency);
+}
#else
static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
s32 mask)
@@ -237,6 +243,7 @@ static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {}
static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; }
static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
+static inline s32 dev_pm_qos_raw_read_value(struct device *dev) { return 0; }
#endif
#endif