summaryrefslogtreecommitdiffstats
path: root/drivers/rtc/interface.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/rtc/interface.c')
-rw-r--r--drivers/rtc/interface.c107
1 files changed, 107 insertions, 0 deletions
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 672b192f8153..7cbdc9228dd5 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -17,9 +17,73 @@
#include <linux/log2.h>
#include <linux/workqueue.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/rtc.h>
+
static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer);
static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer);
+static void rtc_add_offset(struct rtc_device *rtc, struct rtc_time *tm)
+{
+ time64_t secs;
+
+ if (!rtc->offset_secs)
+ return;
+
+ secs = rtc_tm_to_time64(tm);
+
+ /*
+ * Since the reading time values from RTC device are always in the RTC
+ * original valid range, but we need to skip the overlapped region
+ * between expanded range and original range, which is no need to add
+ * the offset.
+ */
+ if ((rtc->start_secs > rtc->range_min && secs >= rtc->start_secs) ||
+ (rtc->start_secs < rtc->range_min &&
+ secs <= (rtc->start_secs + rtc->range_max - rtc->range_min)))
+ return;
+
+ rtc_time64_to_tm(secs + rtc->offset_secs, tm);
+}
+
+static void rtc_subtract_offset(struct rtc_device *rtc, struct rtc_time *tm)
+{
+ time64_t secs;
+
+ if (!rtc->offset_secs)
+ return;
+
+ secs = rtc_tm_to_time64(tm);
+
+ /*
+ * If the setting time values are in the valid range of RTC hardware
+ * device, then no need to subtract the offset when setting time to RTC
+ * device. Otherwise we need to subtract the offset to make the time
+ * values are valid for RTC hardware device.
+ */
+ if (secs >= rtc->range_min && secs <= rtc->range_max)
+ return;
+
+ rtc_time64_to_tm(secs - rtc->offset_secs, tm);
+}
+
+static int rtc_valid_range(struct rtc_device *rtc, struct rtc_time *tm)
+{
+ if (rtc->range_min != rtc->range_max) {
+ time64_t time = rtc_tm_to_time64(tm);
+ time64_t range_min = rtc->set_start_time ? rtc->start_secs :
+ rtc->range_min;
+ time64_t range_max = rtc->set_start_time ?
+ (rtc->start_secs + rtc->range_max - rtc->range_min) :
+ rtc->range_max;
+
+ if (time < range_min || time > range_max)
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
{
int err;
@@ -36,6 +100,8 @@ static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
return err;
}
+ rtc_add_offset(rtc, tm);
+
err = rtc_valid_tm(tm);
if (err < 0)
dev_dbg(&rtc->dev, "read_time: rtc_time isn't valid\n");
@@ -53,6 +119,8 @@ int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
err = __rtc_read_time(rtc, tm);
mutex_unlock(&rtc->ops_lock);
+
+ trace_rtc_read_time(rtc_tm_to_time64(tm), err);
return err;
}
EXPORT_SYMBOL_GPL(rtc_read_time);
@@ -65,6 +133,12 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
if (err != 0)
return err;
+ err = rtc_valid_range(rtc, tm);
+ if (err)
+ return err;
+
+ rtc_subtract_offset(rtc, tm);
+
err = mutex_lock_interruptible(&rtc->ops_lock);
if (err)
return err;
@@ -87,6 +161,8 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
mutex_unlock(&rtc->ops_lock);
/* A timer might have just expired */
schedule_work(&rtc->irqwork);
+
+ trace_rtc_set_time(rtc_tm_to_time64(tm), err);
return err;
}
EXPORT_SYMBOL_GPL(rtc_set_time);
@@ -119,6 +195,8 @@ static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *al
}
mutex_unlock(&rtc->ops_lock);
+
+ trace_rtc_read_alarm(rtc_tm_to_time64(&alarm->time), err);
return err;
}
@@ -316,6 +394,7 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
}
mutex_unlock(&rtc->ops_lock);
+ trace_rtc_read_alarm(rtc_tm_to_time64(&alarm->time), err);
return err;
}
EXPORT_SYMBOL_GPL(rtc_read_alarm);
@@ -329,6 +408,8 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
err = rtc_valid_tm(&alarm->time);
if (err)
return err;
+
+ rtc_subtract_offset(rtc, &alarm->time);
scheduled = rtc_tm_to_time64(&alarm->time);
/* Make sure we're not setting alarms in the past */
@@ -352,6 +433,7 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
else
err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+ trace_rtc_set_alarm(rtc_tm_to_time64(&alarm->time), err);
return err;
}
@@ -363,6 +445,10 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
if (err != 0)
return err;
+ err = rtc_valid_range(rtc, &alarm->time);
+ if (err)
+ return err;
+
err = mutex_lock_interruptible(&rtc->ops_lock);
if (err)
return err;
@@ -375,6 +461,8 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
mutex_unlock(&rtc->ops_lock);
+
+ rtc_add_offset(rtc, &alarm->time);
return err;
}
EXPORT_SYMBOL_GPL(rtc_set_alarm);
@@ -406,6 +494,7 @@ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
rtc->aie_timer.enabled = 1;
timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
+ trace_rtc_timer_enqueue(&rtc->aie_timer);
}
mutex_unlock(&rtc->ops_lock);
return err;
@@ -435,6 +524,8 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
err = rtc->ops->alarm_irq_enable(rtc->dev.parent, enabled);
mutex_unlock(&rtc->ops_lock);
+
+ trace_rtc_alarm_irq_enable(enabled, err);
return err;
}
EXPORT_SYMBOL_GPL(rtc_alarm_irq_enable);
@@ -709,6 +800,8 @@ retry:
rtc->pie_enabled = enabled;
}
spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
+
+ trace_rtc_irq_set_state(enabled, err);
return err;
}
EXPORT_SYMBOL_GPL(rtc_irq_set_state);
@@ -745,6 +838,8 @@ retry:
}
}
spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
+
+ trace_rtc_irq_set_freq(freq, err);
return err;
}
EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
@@ -779,6 +874,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
}
timerqueue_add(&rtc->timerqueue, &timer->node);
+ trace_rtc_timer_enqueue(timer);
if (!next || ktime_before(timer->node.expires, next->expires)) {
struct rtc_wkalrm alarm;
int err;
@@ -790,6 +886,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
schedule_work(&rtc->irqwork);
} else if (err) {
timerqueue_del(&rtc->timerqueue, &timer->node);
+ trace_rtc_timer_dequeue(timer);
timer->enabled = 0;
return err;
}
@@ -803,6 +900,7 @@ static void rtc_alarm_disable(struct rtc_device *rtc)
return;
rtc->ops->alarm_irq_enable(rtc->dev.parent, false);
+ trace_rtc_alarm_irq_enable(0, 0);
}
/**
@@ -821,6 +919,7 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
{
struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
timerqueue_del(&rtc->timerqueue, &timer->node);
+ trace_rtc_timer_dequeue(timer);
timer->enabled = 0;
if (next == &timer->node) {
struct rtc_wkalrm alarm;
@@ -871,16 +970,19 @@ again:
/* expire timer */
timer = container_of(next, struct rtc_timer, node);
timerqueue_del(&rtc->timerqueue, &timer->node);
+ trace_rtc_timer_dequeue(timer);
timer->enabled = 0;
if (timer->task.func)
timer->task.func(timer->task.private_data);
+ trace_rtc_timer_fired(timer);
/* Re-add/fwd periodic timers */
if (ktime_to_ns(timer->period)) {
timer->node.expires = ktime_add(timer->node.expires,
timer->period);
timer->enabled = 1;
timerqueue_add(&rtc->timerqueue, &timer->node);
+ trace_rtc_timer_enqueue(timer);
}
}
@@ -902,6 +1004,7 @@ reprogram:
timer = container_of(next, struct rtc_timer, node);
timerqueue_del(&rtc->timerqueue, &timer->node);
+ trace_rtc_timer_dequeue(timer);
timer->enabled = 0;
dev_err(&rtc->dev, "__rtc_set_alarm: err=%d\n", err);
goto again;
@@ -992,6 +1095,8 @@ int rtc_read_offset(struct rtc_device *rtc, long *offset)
mutex_lock(&rtc->ops_lock);
ret = rtc->ops->read_offset(rtc->dev.parent, offset);
mutex_unlock(&rtc->ops_lock);
+
+ trace_rtc_read_offset(*offset, ret);
return ret;
}
@@ -1025,5 +1130,7 @@ int rtc_set_offset(struct rtc_device *rtc, long offset)
mutex_lock(&rtc->ops_lock);
ret = rtc->ops->set_offset(rtc->dev.parent, offset);
mutex_unlock(&rtc->ops_lock);
+
+ trace_rtc_set_offset(offset, ret);
return ret;
}