summaryrefslogtreecommitdiffstats
path: root/drivers/pmdomain
diff options
context:
space:
mode:
authorUlf Hansson <ulf.hansson@linaro.org>2024-05-27 16:25:51 +0200
committerUlf Hansson <ulf.hansson@linaro.org>2024-08-05 13:12:04 +0200
commitd7bdb8e6aabe218fd980768a1486434e42761539 (patch)
treea04ca728f0b95fa0a8e12fa92aacabebdf2e0cef /drivers/pmdomain
parentpmdomain: amlogic: remove obsolete vpu domain driver (diff)
downloadlinux-d7bdb8e6aabe218fd980768a1486434e42761539.tar.xz
linux-d7bdb8e6aabe218fd980768a1486434e42761539.zip
pmdomain: core: Enable s2idle for CPU PM domains on PREEMPT_RT
To allow a genpd provider for a CPU PM domain to enter a domain-idle-state during s2idle on a PREEMPT_RT based configuration, we can't use the regular spinlock, as they are turned into sleepable locks on PREEMPT_RT. To address this problem, let's convert into using the raw spinlock, but only for genpd providers that have the GENPD_FLAG_CPU_DOMAIN bit set. In this way, the lock can still be acquired/released in atomic context, which is needed in the idle-path for PREEMPT_RT. Do note that the genpd power-on/off notifiers may also be fired during s2idle, but these are already prepared for PREEMPT_RT as they are based on the raw notifiers. However, consumers of them may need to adopt accordingly to work properly on PREEMPT_RT. Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org> Tested-by: Raghavendra Kakarla <quic_rkakarla@quicinc.com> # qcm6490 with PREEMPT_RT set Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Link: https://lore.kernel.org/r/20240527142557.321610-2-ulf.hansson@linaro.org
Diffstat (limited to 'drivers/pmdomain')
-rw-r--r--drivers/pmdomain/core.c47
1 files changed, 46 insertions, 1 deletions
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index 7a61aa88c061..8c798a46ffec 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -117,6 +117,48 @@ static const struct genpd_lock_ops genpd_spin_ops = {
.unlock = genpd_unlock_spin,
};
+static void genpd_lock_raw_spin(struct generic_pm_domain *genpd)
+ __acquires(&genpd->raw_slock)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&genpd->raw_slock, flags);
+ genpd->raw_lock_flags = flags;
+}
+
+static void genpd_lock_nested_raw_spin(struct generic_pm_domain *genpd,
+ int depth)
+ __acquires(&genpd->raw_slock)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave_nested(&genpd->raw_slock, flags, depth);
+ genpd->raw_lock_flags = flags;
+}
+
+static int genpd_lock_interruptible_raw_spin(struct generic_pm_domain *genpd)
+ __acquires(&genpd->raw_slock)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&genpd->raw_slock, flags);
+ genpd->raw_lock_flags = flags;
+ return 0;
+}
+
+static void genpd_unlock_raw_spin(struct generic_pm_domain *genpd)
+ __releases(&genpd->raw_slock)
+{
+ raw_spin_unlock_irqrestore(&genpd->raw_slock, genpd->raw_lock_flags);
+}
+
+static const struct genpd_lock_ops genpd_raw_spin_ops = {
+ .lock = genpd_lock_raw_spin,
+ .lock_nested = genpd_lock_nested_raw_spin,
+ .lock_interruptible = genpd_lock_interruptible_raw_spin,
+ .unlock = genpd_unlock_raw_spin,
+};
+
#define genpd_lock(p) p->lock_ops->lock(p)
#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
@@ -2143,7 +2185,10 @@ static void genpd_free_data(struct generic_pm_domain *genpd)
static void genpd_lock_init(struct generic_pm_domain *genpd)
{
- if (genpd_is_irq_safe(genpd)) {
+ if (genpd_is_cpu_domain(genpd)) {
+ raw_spin_lock_init(&genpd->raw_slock);
+ genpd->lock_ops = &genpd_raw_spin_ops;
+ } else if (genpd_is_irq_safe(genpd)) {
spin_lock_init(&genpd->slock);
genpd->lock_ops = &genpd_spin_ops;
} else {