xref: /kernel/linux/linux-6.6/drivers/rtc/interface.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * RTC subsystem, interface functions
4 *
5 * Copyright (C) 2005 Tower Technologies
6 * Author: Alessandro Zummo <a.zummo@towertech.it>
7 *
8 * based on arch/arm/common/rtctime.c
9 */
10
11#include <linux/rtc.h>
12#include <linux/sched.h>
13#include <linux/module.h>
14#include <linux/log2.h>
15#include <linux/workqueue.h>
16
17#define CREATE_TRACE_POINTS
18#include <trace/events/rtc.h>
19
20static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer);
21static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer);
22
23static void rtc_add_offset(struct rtc_device *rtc, struct rtc_time *tm)
24{
25	time64_t secs;
26
27	if (!rtc->offset_secs)
28		return;
29
30	secs = rtc_tm_to_time64(tm);
31
32	/*
33	 * Since the reading time values from RTC device are always in the RTC
34	 * original valid range, but we need to skip the overlapped region
35	 * between expanded range and original range, which is no need to add
36	 * the offset.
37	 */
38	if ((rtc->start_secs > rtc->range_min && secs >= rtc->start_secs) ||
39	    (rtc->start_secs < rtc->range_min &&
40	     secs <= (rtc->start_secs + rtc->range_max - rtc->range_min)))
41		return;
42
43	rtc_time64_to_tm(secs + rtc->offset_secs, tm);
44}
45
46static void rtc_subtract_offset(struct rtc_device *rtc, struct rtc_time *tm)
47{
48	time64_t secs;
49
50	if (!rtc->offset_secs)
51		return;
52
53	secs = rtc_tm_to_time64(tm);
54
55	/*
56	 * If the setting time values are in the valid range of RTC hardware
57	 * device, then no need to subtract the offset when setting time to RTC
58	 * device. Otherwise we need to subtract the offset to make the time
59	 * values are valid for RTC hardware device.
60	 */
61	if (secs >= rtc->range_min && secs <= rtc->range_max)
62		return;
63
64	rtc_time64_to_tm(secs - rtc->offset_secs, tm);
65}
66
67static int rtc_valid_range(struct rtc_device *rtc, struct rtc_time *tm)
68{
69	if (rtc->range_min != rtc->range_max) {
70		time64_t time = rtc_tm_to_time64(tm);
71		time64_t range_min = rtc->set_start_time ? rtc->start_secs :
72			rtc->range_min;
73		timeu64_t range_max = rtc->set_start_time ?
74			(rtc->start_secs + rtc->range_max - rtc->range_min) :
75			rtc->range_max;
76
77		if (time < range_min || time > range_max)
78			return -ERANGE;
79	}
80
81	return 0;
82}
83
84static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
85{
86	int err;
87
88	if (!rtc->ops) {
89		err = -ENODEV;
90	} else if (!rtc->ops->read_time) {
91		err = -EINVAL;
92	} else {
93		memset(tm, 0, sizeof(struct rtc_time));
94		err = rtc->ops->read_time(rtc->dev.parent, tm);
95		if (err < 0) {
96			dev_dbg(&rtc->dev, "read_time: fail to read: %d\n",
97				err);
98			return err;
99		}
100
101		rtc_add_offset(rtc, tm);
102
103		err = rtc_valid_tm(tm);
104		if (err < 0)
105			dev_dbg(&rtc->dev, "read_time: rtc_time isn't valid\n");
106	}
107	return err;
108}
109
110int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
111{
112	int err;
113
114	err = mutex_lock_interruptible(&rtc->ops_lock);
115	if (err)
116		return err;
117
118	err = __rtc_read_time(rtc, tm);
119	mutex_unlock(&rtc->ops_lock);
120
121	trace_rtc_read_time(rtc_tm_to_time64(tm), err);
122	return err;
123}
124EXPORT_SYMBOL_GPL(rtc_read_time);
125
126int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
127{
128	int err, uie;
129
130	err = rtc_valid_tm(tm);
131	if (err != 0)
132		return err;
133
134	err = rtc_valid_range(rtc, tm);
135	if (err)
136		return err;
137
138	rtc_subtract_offset(rtc, tm);
139
140#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
141	uie = rtc->uie_rtctimer.enabled || rtc->uie_irq_active;
142#else
143	uie = rtc->uie_rtctimer.enabled;
144#endif
145	if (uie) {
146		err = rtc_update_irq_enable(rtc, 0);
147		if (err)
148			return err;
149	}
150
151	err = mutex_lock_interruptible(&rtc->ops_lock);
152	if (err)
153		return err;
154
155	if (!rtc->ops)
156		err = -ENODEV;
157	else if (rtc->ops->set_time)
158		err = rtc->ops->set_time(rtc->dev.parent, tm);
159	else
160		err = -EINVAL;
161
162	pm_stay_awake(rtc->dev.parent);
163	mutex_unlock(&rtc->ops_lock);
164	/* A timer might have just expired */
165	schedule_work(&rtc->irqwork);
166
167	if (uie) {
168		err = rtc_update_irq_enable(rtc, 1);
169		if (err)
170			return err;
171	}
172
173	trace_rtc_set_time(rtc_tm_to_time64(tm), err);
174	return err;
175}
176EXPORT_SYMBOL_GPL(rtc_set_time);
177
178static int rtc_read_alarm_internal(struct rtc_device *rtc,
179				   struct rtc_wkalrm *alarm)
180{
181	int err;
182
183	err = mutex_lock_interruptible(&rtc->ops_lock);
184	if (err)
185		return err;
186
187	if (!rtc->ops) {
188		err = -ENODEV;
189	} else if (!test_bit(RTC_FEATURE_ALARM, rtc->features) || !rtc->ops->read_alarm) {
190		err = -EINVAL;
191	} else {
192		alarm->enabled = 0;
193		alarm->pending = 0;
194		alarm->time.tm_sec = -1;
195		alarm->time.tm_min = -1;
196		alarm->time.tm_hour = -1;
197		alarm->time.tm_mday = -1;
198		alarm->time.tm_mon = -1;
199		alarm->time.tm_year = -1;
200		alarm->time.tm_wday = -1;
201		alarm->time.tm_yday = -1;
202		alarm->time.tm_isdst = -1;
203		err = rtc->ops->read_alarm(rtc->dev.parent, alarm);
204	}
205
206	mutex_unlock(&rtc->ops_lock);
207
208	trace_rtc_read_alarm(rtc_tm_to_time64(&alarm->time), err);
209	return err;
210}
211
212int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
213{
214	int err;
215	struct rtc_time before, now;
216	int first_time = 1;
217	time64_t t_now, t_alm;
218	enum { none, day, month, year } missing = none;
219	unsigned int days;
220
221	/* The lower level RTC driver may return -1 in some fields,
222	 * creating invalid alarm->time values, for reasons like:
223	 *
224	 *   - The hardware may not be capable of filling them in;
225	 *     many alarms match only on time-of-day fields, not
226	 *     day/month/year calendar data.
227	 *
228	 *   - Some hardware uses illegal values as "wildcard" match
229	 *     values, which non-Linux firmware (like a BIOS) may try
230	 *     to set up as e.g. "alarm 15 minutes after each hour".
231	 *     Linux uses only oneshot alarms.
232	 *
233	 * When we see that here, we deal with it by using values from
234	 * a current RTC timestamp for any missing (-1) values.  The
235	 * RTC driver prevents "periodic alarm" modes.
236	 *
237	 * But this can be racey, because some fields of the RTC timestamp
238	 * may have wrapped in the interval since we read the RTC alarm,
239	 * which would lead to us inserting inconsistent values in place
240	 * of the -1 fields.
241	 *
242	 * Reading the alarm and timestamp in the reverse sequence
243	 * would have the same race condition, and not solve the issue.
244	 *
245	 * So, we must first read the RTC timestamp,
246	 * then read the RTC alarm value,
247	 * and then read a second RTC timestamp.
248	 *
249	 * If any fields of the second timestamp have changed
250	 * when compared with the first timestamp, then we know
251	 * our timestamp may be inconsistent with that used by
252	 * the low-level rtc_read_alarm_internal() function.
253	 *
254	 * So, when the two timestamps disagree, we just loop and do
255	 * the process again to get a fully consistent set of values.
256	 *
257	 * This could all instead be done in the lower level driver,
258	 * but since more than one lower level RTC implementation needs it,
259	 * then it's probably best to do it here instead of there..
260	 */
261
262	/* Get the "before" timestamp */
263	err = rtc_read_time(rtc, &before);
264	if (err < 0)
265		return err;
266	do {
267		if (!first_time)
268			memcpy(&before, &now, sizeof(struct rtc_time));
269		first_time = 0;
270
271		/* get the RTC alarm values, which may be incomplete */
272		err = rtc_read_alarm_internal(rtc, alarm);
273		if (err)
274			return err;
275
276		/* full-function RTCs won't have such missing fields */
277		if (rtc_valid_tm(&alarm->time) == 0) {
278			rtc_add_offset(rtc, &alarm->time);
279			return 0;
280		}
281
282		/* get the "after" timestamp, to detect wrapped fields */
283		err = rtc_read_time(rtc, &now);
284		if (err < 0)
285			return err;
286
287		/* note that tm_sec is a "don't care" value here: */
288	} while (before.tm_min  != now.tm_min ||
289		 before.tm_hour != now.tm_hour ||
290		 before.tm_mon  != now.tm_mon ||
291		 before.tm_year != now.tm_year);
292
293	/* Fill in the missing alarm fields using the timestamp; we
294	 * know there's at least one since alarm->time is invalid.
295	 */
296	if (alarm->time.tm_sec == -1)
297		alarm->time.tm_sec = now.tm_sec;
298	if (alarm->time.tm_min == -1)
299		alarm->time.tm_min = now.tm_min;
300	if (alarm->time.tm_hour == -1)
301		alarm->time.tm_hour = now.tm_hour;
302
303	/* For simplicity, only support date rollover for now */
304	if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) {
305		alarm->time.tm_mday = now.tm_mday;
306		missing = day;
307	}
308	if ((unsigned int)alarm->time.tm_mon >= 12) {
309		alarm->time.tm_mon = now.tm_mon;
310		if (missing == none)
311			missing = month;
312	}
313	if (alarm->time.tm_year == -1) {
314		alarm->time.tm_year = now.tm_year;
315		if (missing == none)
316			missing = year;
317	}
318
319	/* Can't proceed if alarm is still invalid after replacing
320	 * missing fields.
321	 */
322	err = rtc_valid_tm(&alarm->time);
323	if (err)
324		goto done;
325
326	/* with luck, no rollover is needed */
327	t_now = rtc_tm_to_time64(&now);
328	t_alm = rtc_tm_to_time64(&alarm->time);
329	if (t_now < t_alm)
330		goto done;
331
332	switch (missing) {
333	/* 24 hour rollover ... if it's now 10am Monday, an alarm that
334	 * that will trigger at 5am will do so at 5am Tuesday, which
335	 * could also be in the next month or year.  This is a common
336	 * case, especially for PCs.
337	 */
338	case day:
339		dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day");
340		t_alm += 24 * 60 * 60;
341		rtc_time64_to_tm(t_alm, &alarm->time);
342		break;
343
344	/* Month rollover ... if it's the 31th, an alarm on the 3rd will
345	 * be next month.  An alarm matching on the 30th, 29th, or 28th
346	 * may end up in the month after that!  Many newer PCs support
347	 * this type of alarm.
348	 */
349	case month:
350		dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month");
351		do {
352			if (alarm->time.tm_mon < 11) {
353				alarm->time.tm_mon++;
354			} else {
355				alarm->time.tm_mon = 0;
356				alarm->time.tm_year++;
357			}
358			days = rtc_month_days(alarm->time.tm_mon,
359					      alarm->time.tm_year);
360		} while (days < alarm->time.tm_mday);
361		break;
362
363	/* Year rollover ... easy except for leap years! */
364	case year:
365		dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year");
366		do {
367			alarm->time.tm_year++;
368		} while (!is_leap_year(alarm->time.tm_year + 1900) &&
369			 rtc_valid_tm(&alarm->time) != 0);
370		break;
371
372	default:
373		dev_warn(&rtc->dev, "alarm rollover not handled\n");
374	}
375
376	err = rtc_valid_tm(&alarm->time);
377
378done:
379	if (err && alarm->enabled)
380		dev_warn(&rtc->dev, "invalid alarm value: %ptR\n",
381			 &alarm->time);
382
383	return err;
384}
385
386int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
387{
388	int err;
389
390	err = mutex_lock_interruptible(&rtc->ops_lock);
391	if (err)
392		return err;
393	if (!rtc->ops) {
394		err = -ENODEV;
395	} else if (!test_bit(RTC_FEATURE_ALARM, rtc->features)) {
396		err = -EINVAL;
397	} else {
398		memset(alarm, 0, sizeof(struct rtc_wkalrm));
399		alarm->enabled = rtc->aie_timer.enabled;
400		alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires);
401	}
402	mutex_unlock(&rtc->ops_lock);
403
404	trace_rtc_read_alarm(rtc_tm_to_time64(&alarm->time), err);
405	return err;
406}
407EXPORT_SYMBOL_GPL(rtc_read_alarm);
408
409static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
410{
411	struct rtc_time tm;
412	time64_t now, scheduled;
413	int err;
414
415	err = rtc_valid_tm(&alarm->time);
416	if (err)
417		return err;
418
419	scheduled = rtc_tm_to_time64(&alarm->time);
420
421	/* Make sure we're not setting alarms in the past */
422	err = __rtc_read_time(rtc, &tm);
423	if (err)
424		return err;
425	now = rtc_tm_to_time64(&tm);
426
427	if (scheduled <= now)
428		return -ETIME;
429	/*
430	 * XXX - We just checked to make sure the alarm time is not
431	 * in the past, but there is still a race window where if
432	 * the is alarm set for the next second and the second ticks
433	 * over right here, before we set the alarm.
434	 */
435
436	rtc_subtract_offset(rtc, &alarm->time);
437
438	if (!rtc->ops)
439		err = -ENODEV;
440	else if (!test_bit(RTC_FEATURE_ALARM, rtc->features))
441		err = -EINVAL;
442	else
443		err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
444
445	trace_rtc_set_alarm(rtc_tm_to_time64(&alarm->time), err);
446	return err;
447}
448
449int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
450{
451	ktime_t alarm_time;
452	int err;
453
454	if (!rtc->ops)
455		return -ENODEV;
456	else if (!test_bit(RTC_FEATURE_ALARM, rtc->features))
457		return -EINVAL;
458
459	err = rtc_valid_tm(&alarm->time);
460	if (err != 0)
461		return err;
462
463	err = rtc_valid_range(rtc, &alarm->time);
464	if (err)
465		return err;
466
467	err = mutex_lock_interruptible(&rtc->ops_lock);
468	if (err)
469		return err;
470	if (rtc->aie_timer.enabled)
471		rtc_timer_remove(rtc, &rtc->aie_timer);
472
473	alarm_time = rtc_tm_to_ktime(alarm->time);
474	/*
475	 * Round down so we never miss a deadline, checking for past deadline is
476	 * done in __rtc_set_alarm
477	 */
478	if (test_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->features))
479		alarm_time = ktime_sub_ns(alarm_time, (u64)alarm->time.tm_sec * NSEC_PER_SEC);
480
481	rtc->aie_timer.node.expires = alarm_time;
482	rtc->aie_timer.period = 0;
483	if (alarm->enabled)
484		err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
485
486	mutex_unlock(&rtc->ops_lock);
487
488	return err;
489}
490EXPORT_SYMBOL_GPL(rtc_set_alarm);
491
492/* Called once per device from rtc_device_register */
493int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
494{
495	int err;
496	struct rtc_time now;
497
498	err = rtc_valid_tm(&alarm->time);
499	if (err != 0)
500		return err;
501
502	err = rtc_read_time(rtc, &now);
503	if (err)
504		return err;
505
506	err = mutex_lock_interruptible(&rtc->ops_lock);
507	if (err)
508		return err;
509
510	rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
511	rtc->aie_timer.period = 0;
512
513	/* Alarm has to be enabled & in the future for us to enqueue it */
514	if (alarm->enabled && (rtc_tm_to_ktime(now) <
515			 rtc->aie_timer.node.expires)) {
516		rtc->aie_timer.enabled = 1;
517		timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
518		trace_rtc_timer_enqueue(&rtc->aie_timer);
519	}
520	mutex_unlock(&rtc->ops_lock);
521	return err;
522}
523EXPORT_SYMBOL_GPL(rtc_initialize_alarm);
524
525int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
526{
527	int err;
528
529	err = mutex_lock_interruptible(&rtc->ops_lock);
530	if (err)
531		return err;
532
533	if (rtc->aie_timer.enabled != enabled) {
534		if (enabled)
535			err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
536		else
537			rtc_timer_remove(rtc, &rtc->aie_timer);
538	}
539
540	if (err)
541		/* nothing */;
542	else if (!rtc->ops)
543		err = -ENODEV;
544	else if (!test_bit(RTC_FEATURE_ALARM, rtc->features) || !rtc->ops->alarm_irq_enable)
545		err = -EINVAL;
546	else
547		err = rtc->ops->alarm_irq_enable(rtc->dev.parent, enabled);
548
549	mutex_unlock(&rtc->ops_lock);
550
551	trace_rtc_alarm_irq_enable(enabled, err);
552	return err;
553}
554EXPORT_SYMBOL_GPL(rtc_alarm_irq_enable);
555
556int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
557{
558	int err;
559
560	err = mutex_lock_interruptible(&rtc->ops_lock);
561	if (err)
562		return err;
563
564#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
565	if (enabled == 0 && rtc->uie_irq_active) {
566		mutex_unlock(&rtc->ops_lock);
567		return rtc_dev_update_irq_enable_emul(rtc, 0);
568	}
569#endif
570	/* make sure we're changing state */
571	if (rtc->uie_rtctimer.enabled == enabled)
572		goto out;
573
574	if (!test_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features) ||
575	    !test_bit(RTC_FEATURE_ALARM, rtc->features)) {
576		mutex_unlock(&rtc->ops_lock);
577#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
578		return rtc_dev_update_irq_enable_emul(rtc, enabled);
579#else
580		return -EINVAL;
581#endif
582	}
583
584	if (enabled) {
585		struct rtc_time tm;
586		ktime_t now, onesec;
587
588		err = __rtc_read_time(rtc, &tm);
589		if (err)
590			goto out;
591		onesec = ktime_set(1, 0);
592		now = rtc_tm_to_ktime(tm);
593		rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
594		rtc->uie_rtctimer.period = ktime_set(1, 0);
595		err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
596	} else {
597		rtc_timer_remove(rtc, &rtc->uie_rtctimer);
598	}
599
600out:
601	mutex_unlock(&rtc->ops_lock);
602
603	return err;
604}
605EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
606
607/**
608 * rtc_handle_legacy_irq - AIE, UIE and PIE event hook
609 * @rtc: pointer to the rtc device
610 * @num: number of occurence of the event
611 * @mode: type of the event, RTC_AF, RTC_UF of RTC_PF
612 *
613 * This function is called when an AIE, UIE or PIE mode interrupt
614 * has occurred (or been emulated).
615 *
616 */
617void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
618{
619	unsigned long flags;
620
621	/* mark one irq of the appropriate mode */
622	spin_lock_irqsave(&rtc->irq_lock, flags);
623	rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF | mode);
624	spin_unlock_irqrestore(&rtc->irq_lock, flags);
625
626	wake_up_interruptible(&rtc->irq_queue);
627	kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
628}
629
630/**
631 * rtc_aie_update_irq - AIE mode rtctimer hook
632 * @rtc: pointer to the rtc_device
633 *
634 * This functions is called when the aie_timer expires.
635 */
636void rtc_aie_update_irq(struct rtc_device *rtc)
637{
638	rtc_handle_legacy_irq(rtc, 1, RTC_AF);
639}
640
641/**
642 * rtc_uie_update_irq - UIE mode rtctimer hook
643 * @rtc: pointer to the rtc_device
644 *
645 * This functions is called when the uie_timer expires.
646 */
647void rtc_uie_update_irq(struct rtc_device *rtc)
648{
649	rtc_handle_legacy_irq(rtc, 1,  RTC_UF);
650}
651
652/**
653 * rtc_pie_update_irq - PIE mode hrtimer hook
654 * @timer: pointer to the pie mode hrtimer
655 *
656 * This function is used to emulate PIE mode interrupts
657 * using an hrtimer. This function is called when the periodic
658 * hrtimer expires.
659 */
660enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
661{
662	struct rtc_device *rtc;
663	ktime_t period;
664	u64 count;
665
666	rtc = container_of(timer, struct rtc_device, pie_timer);
667
668	period = NSEC_PER_SEC / rtc->irq_freq;
669	count = hrtimer_forward_now(timer, period);
670
671	rtc_handle_legacy_irq(rtc, count, RTC_PF);
672
673	return HRTIMER_RESTART;
674}
675
676/**
677 * rtc_update_irq - Triggered when a RTC interrupt occurs.
678 * @rtc: the rtc device
679 * @num: how many irqs are being reported (usually one)
680 * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF
681 * Context: any
682 */
683void rtc_update_irq(struct rtc_device *rtc,
684		    unsigned long num, unsigned long events)
685{
686	if (IS_ERR_OR_NULL(rtc))
687		return;
688
689	pm_stay_awake(rtc->dev.parent);
690	schedule_work(&rtc->irqwork);
691}
692EXPORT_SYMBOL_GPL(rtc_update_irq);
693
694struct rtc_device *rtc_class_open(const char *name)
695{
696	struct device *dev;
697	struct rtc_device *rtc = NULL;
698
699	dev = class_find_device_by_name(rtc_class, name);
700	if (dev)
701		rtc = to_rtc_device(dev);
702
703	if (rtc) {
704		if (!try_module_get(rtc->owner)) {
705			put_device(dev);
706			rtc = NULL;
707		}
708	}
709
710	return rtc;
711}
712EXPORT_SYMBOL_GPL(rtc_class_open);
713
714void rtc_class_close(struct rtc_device *rtc)
715{
716	module_put(rtc->owner);
717	put_device(&rtc->dev);
718}
719EXPORT_SYMBOL_GPL(rtc_class_close);
720
721static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled)
722{
723	/*
724	 * We always cancel the timer here first, because otherwise
725	 * we could run into BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
726	 * when we manage to start the timer before the callback
727	 * returns HRTIMER_RESTART.
728	 *
729	 * We cannot use hrtimer_cancel() here as a running callback
730	 * could be blocked on rtc->irq_task_lock and hrtimer_cancel()
731	 * would spin forever.
732	 */
733	if (hrtimer_try_to_cancel(&rtc->pie_timer) < 0)
734		return -1;
735
736	if (enabled) {
737		ktime_t period = NSEC_PER_SEC / rtc->irq_freq;
738
739		hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL);
740	}
741	return 0;
742}
743
744/**
745 * rtc_irq_set_state - enable/disable 2^N Hz periodic IRQs
746 * @rtc: the rtc device
747 * @enabled: true to enable periodic IRQs
748 * Context: any
749 *
750 * Note that rtc_irq_set_freq() should previously have been used to
751 * specify the desired frequency of periodic IRQ.
752 */
753int rtc_irq_set_state(struct rtc_device *rtc, int enabled)
754{
755	int err = 0;
756
757	while (rtc_update_hrtimer(rtc, enabled) < 0)
758		cpu_relax();
759
760	rtc->pie_enabled = enabled;
761
762	trace_rtc_irq_set_state(enabled, err);
763	return err;
764}
765
766/**
767 * rtc_irq_set_freq - set 2^N Hz periodic IRQ frequency for IRQ
768 * @rtc: the rtc device
769 * @freq: positive frequency
770 * Context: any
771 *
772 * Note that rtc_irq_set_state() is used to enable or disable the
773 * periodic IRQs.
774 */
775int rtc_irq_set_freq(struct rtc_device *rtc, int freq)
776{
777	int err = 0;
778
779	if (freq <= 0 || freq > RTC_MAX_FREQ)
780		return -EINVAL;
781
782	rtc->irq_freq = freq;
783	while (rtc->pie_enabled && rtc_update_hrtimer(rtc, 1) < 0)
784		cpu_relax();
785
786	trace_rtc_irq_set_freq(freq, err);
787	return err;
788}
789
790/**
791 * rtc_timer_enqueue - Adds a rtc_timer to the rtc_device timerqueue
792 * @rtc: rtc device
793 * @timer: timer being added.
794 *
795 * Enqueues a timer onto the rtc devices timerqueue and sets
796 * the next alarm event appropriately.
797 *
798 * Sets the enabled bit on the added timer.
799 *
800 * Must hold ops_lock for proper serialization of timerqueue
801 */
802static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
803{
804	struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
805	struct rtc_time tm;
806	ktime_t now;
807	int err;
808
809	err = __rtc_read_time(rtc, &tm);
810	if (err)
811		return err;
812
813	timer->enabled = 1;
814	now = rtc_tm_to_ktime(tm);
815
816	/* Skip over expired timers */
817	while (next) {
818		if (next->expires >= now)
819			break;
820		next = timerqueue_iterate_next(next);
821	}
822
823	timerqueue_add(&rtc->timerqueue, &timer->node);
824	trace_rtc_timer_enqueue(timer);
825	if (!next || ktime_before(timer->node.expires, next->expires)) {
826		struct rtc_wkalrm alarm;
827
828		alarm.time = rtc_ktime_to_tm(timer->node.expires);
829		alarm.enabled = 1;
830		err = __rtc_set_alarm(rtc, &alarm);
831		if (err == -ETIME) {
832			pm_stay_awake(rtc->dev.parent);
833			schedule_work(&rtc->irqwork);
834		} else if (err) {
835			timerqueue_del(&rtc->timerqueue, &timer->node);
836			trace_rtc_timer_dequeue(timer);
837			timer->enabled = 0;
838			return err;
839		}
840	}
841	return 0;
842}
843
844static void rtc_alarm_disable(struct rtc_device *rtc)
845{
846	if (!rtc->ops || !test_bit(RTC_FEATURE_ALARM, rtc->features) || !rtc->ops->alarm_irq_enable)
847		return;
848
849	rtc->ops->alarm_irq_enable(rtc->dev.parent, false);
850	trace_rtc_alarm_irq_enable(0, 0);
851}
852
853/**
854 * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
855 * @rtc: rtc device
856 * @timer: timer being removed.
857 *
858 * Removes a timer onto the rtc devices timerqueue and sets
859 * the next alarm event appropriately.
860 *
861 * Clears the enabled bit on the removed timer.
862 *
863 * Must hold ops_lock for proper serialization of timerqueue
864 */
865static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
866{
867	struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
868
869	timerqueue_del(&rtc->timerqueue, &timer->node);
870	trace_rtc_timer_dequeue(timer);
871	timer->enabled = 0;
872	if (next == &timer->node) {
873		struct rtc_wkalrm alarm;
874		int err;
875
876		next = timerqueue_getnext(&rtc->timerqueue);
877		if (!next) {
878			rtc_alarm_disable(rtc);
879			return;
880		}
881		alarm.time = rtc_ktime_to_tm(next->expires);
882		alarm.enabled = 1;
883		err = __rtc_set_alarm(rtc, &alarm);
884		if (err == -ETIME) {
885			pm_stay_awake(rtc->dev.parent);
886			schedule_work(&rtc->irqwork);
887		}
888	}
889}
890
891/**
892 * rtc_timer_do_work - Expires rtc timers
893 * @work: work item
894 *
895 * Expires rtc timers. Reprograms next alarm event if needed.
896 * Called via worktask.
897 *
898 * Serializes access to timerqueue via ops_lock mutex
899 */
900void rtc_timer_do_work(struct work_struct *work)
901{
902	struct rtc_timer *timer;
903	struct timerqueue_node *next;
904	ktime_t now;
905	struct rtc_time tm;
906
907	struct rtc_device *rtc =
908		container_of(work, struct rtc_device, irqwork);
909
910	mutex_lock(&rtc->ops_lock);
911again:
912	__rtc_read_time(rtc, &tm);
913	now = rtc_tm_to_ktime(tm);
914	while ((next = timerqueue_getnext(&rtc->timerqueue))) {
915		if (next->expires > now)
916			break;
917
918		/* expire timer */
919		timer = container_of(next, struct rtc_timer, node);
920		timerqueue_del(&rtc->timerqueue, &timer->node);
921		trace_rtc_timer_dequeue(timer);
922		timer->enabled = 0;
923		if (timer->func)
924			timer->func(timer->rtc);
925
926		trace_rtc_timer_fired(timer);
927		/* Re-add/fwd periodic timers */
928		if (ktime_to_ns(timer->period)) {
929			timer->node.expires = ktime_add(timer->node.expires,
930							timer->period);
931			timer->enabled = 1;
932			timerqueue_add(&rtc->timerqueue, &timer->node);
933			trace_rtc_timer_enqueue(timer);
934		}
935	}
936
937	/* Set next alarm */
938	if (next) {
939		struct rtc_wkalrm alarm;
940		int err;
941		int retry = 3;
942
943		alarm.time = rtc_ktime_to_tm(next->expires);
944		alarm.enabled = 1;
945reprogram:
946		err = __rtc_set_alarm(rtc, &alarm);
947		if (err == -ETIME) {
948			goto again;
949		} else if (err) {
950			if (retry-- > 0)
951				goto reprogram;
952
953			timer = container_of(next, struct rtc_timer, node);
954			timerqueue_del(&rtc->timerqueue, &timer->node);
955			trace_rtc_timer_dequeue(timer);
956			timer->enabled = 0;
957			dev_err(&rtc->dev, "__rtc_set_alarm: err=%d\n", err);
958			goto again;
959		}
960	} else {
961		rtc_alarm_disable(rtc);
962	}
963
964	pm_relax(rtc->dev.parent);
965	mutex_unlock(&rtc->ops_lock);
966}
967
968/* rtc_timer_init - Initializes an rtc_timer
969 * @timer: timer to be intiialized
970 * @f: function pointer to be called when timer fires
971 * @rtc: pointer to the rtc_device
972 *
973 * Kernel interface to initializing an rtc_timer.
974 */
975void rtc_timer_init(struct rtc_timer *timer, void (*f)(struct rtc_device *r),
976		    struct rtc_device *rtc)
977{
978	timerqueue_init(&timer->node);
979	timer->enabled = 0;
980	timer->func = f;
981	timer->rtc = rtc;
982}
983
984/* rtc_timer_start - Sets an rtc_timer to fire in the future
985 * @ rtc: rtc device to be used
986 * @ timer: timer being set
987 * @ expires: time at which to expire the timer
988 * @ period: period that the timer will recur
989 *
990 * Kernel interface to set an rtc_timer
991 */
992int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer,
993		    ktime_t expires, ktime_t period)
994{
995	int ret = 0;
996
997	mutex_lock(&rtc->ops_lock);
998	if (timer->enabled)
999		rtc_timer_remove(rtc, timer);
1000
1001	timer->node.expires = expires;
1002	timer->period = period;
1003
1004	ret = rtc_timer_enqueue(rtc, timer);
1005
1006	mutex_unlock(&rtc->ops_lock);
1007	return ret;
1008}
1009
1010/* rtc_timer_cancel - Stops an rtc_timer
1011 * @ rtc: rtc device to be used
1012 * @ timer: timer being set
1013 *
1014 * Kernel interface to cancel an rtc_timer
1015 */
1016void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer)
1017{
1018	mutex_lock(&rtc->ops_lock);
1019	if (timer->enabled)
1020		rtc_timer_remove(rtc, timer);
1021	mutex_unlock(&rtc->ops_lock);
1022}
1023
1024/**
1025 * rtc_read_offset - Read the amount of rtc offset in parts per billion
1026 * @rtc: rtc device to be used
1027 * @offset: the offset in parts per billion
1028 *
1029 * see below for details.
1030 *
1031 * Kernel interface to read rtc clock offset
1032 * Returns 0 on success, or a negative number on error.
1033 * If read_offset() is not implemented for the rtc, return -EINVAL
1034 */
1035int rtc_read_offset(struct rtc_device *rtc, long *offset)
1036{
1037	int ret;
1038
1039	if (!rtc->ops)
1040		return -ENODEV;
1041
1042	if (!rtc->ops->read_offset)
1043		return -EINVAL;
1044
1045	mutex_lock(&rtc->ops_lock);
1046	ret = rtc->ops->read_offset(rtc->dev.parent, offset);
1047	mutex_unlock(&rtc->ops_lock);
1048
1049	trace_rtc_read_offset(*offset, ret);
1050	return ret;
1051}
1052
1053/**
1054 * rtc_set_offset - Adjusts the duration of the average second
1055 * @rtc: rtc device to be used
1056 * @offset: the offset in parts per billion
1057 *
1058 * Some rtc's allow an adjustment to the average duration of a second
1059 * to compensate for differences in the actual clock rate due to temperature,
1060 * the crystal, capacitor, etc.
1061 *
1062 * The adjustment applied is as follows:
1063 *   t = t0 * (1 + offset * 1e-9)
1064 * where t0 is the measured length of 1 RTC second with offset = 0
1065 *
1066 * Kernel interface to adjust an rtc clock offset.
1067 * Return 0 on success, or a negative number on error.
1068 * If the rtc offset is not setable (or not implemented), return -EINVAL
1069 */
1070int rtc_set_offset(struct rtc_device *rtc, long offset)
1071{
1072	int ret;
1073
1074	if (!rtc->ops)
1075		return -ENODEV;
1076
1077	if (!rtc->ops->set_offset)
1078		return -EINVAL;
1079
1080	mutex_lock(&rtc->ops_lock);
1081	ret = rtc->ops->set_offset(rtc->dev.parent, offset);
1082	mutex_unlock(&rtc->ops_lock);
1083
1084	trace_rtc_set_offset(offset, ret);
1085	return ret;
1086}
1087