Lines Matching refs:tk
117 static inline void tk_normalize_xtime(struct timekeeper *tk)
119 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
120 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
121 tk->xtime_sec++;
123 while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
124 tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
125 tk->raw_sec++;
129 static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
133 ts.tv_sec = tk->xtime_sec;
134 ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
138 static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
140 tk->xtime_sec = ts->tv_sec;
141 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
144 static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
146 tk->xtime_sec += ts->tv_sec;
147 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
148 tk_normalize_xtime(tk);
151 static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
159 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
160 -tk->wall_to_monotonic.tv_nsec);
161 WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
162 tk->wall_to_monotonic = wtm;
164 tk->offs_real = timespec64_to_ktime(tmp);
165 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
168 static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
170 tk->offs_boot = ktime_add(tk->offs_boot, delta);
175 tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot);
201 static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
204 u64 max_cycles = tk->tkr_mono.clock->max_cycles;
205 const char *name = tk->tkr_mono.clock->name;
219 if (tk->underflow_seen) {
220 if (jiffies - tk->last_warning > WARNING_FREQ) {
224 tk->last_warning = jiffies;
226 tk->underflow_seen = 0;
229 if (tk->overflow_seen) {
230 if (jiffies - tk->last_warning > WARNING_FREQ) {
234 tk->last_warning = jiffies;
236 tk->overflow_seen = 0;
242 struct timekeeper *tk = &tk_core.timekeeper;
268 tk->underflow_seen = 1;
274 tk->overflow_seen = 1;
281 static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
301 * @tk: The target timekeeper to setup.
309 static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
315 ++tk->cs_was_changed_seq;
316 old_clock = tk->tkr_mono.clock;
317 tk->tkr_mono.clock = clock;
318 tk->tkr_mono.mask = clock->mask;
319 tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
321 tk->tkr_raw.clock = clock;
322 tk->tkr_raw.mask = clock->mask;
323 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
335 tk->cycle_interval = interval;
338 tk->xtime_interval = interval * clock->mult;
339 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
340 tk->raw_interval = interval * clock->mult;
346 tk->tkr_mono.xtime_nsec >>= -shift_change;
347 tk->tkr_raw.xtime_nsec >>= -shift_change;
349 tk->tkr_mono.xtime_nsec <<= shift_change;
350 tk->tkr_raw.xtime_nsec <<= shift_change;
354 tk->tkr_mono.shift = clock->shift;
355 tk->tkr_raw.shift = clock->shift;
357 tk->ntp_error = 0;
358 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
359 tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
366 tk->tkr_mono.mult = clock->mult;
367 tk->tkr_raw.mult = clock->mult;
368 tk->ntp_err_mult = 0;
369 tk->skip_second_overflow = 0;
521 * __timekeeping_inject_sleeptime(tk, delta);
523 * timekeeping_update(tk, TK_CLEAR_NTP...);
525 * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
526 * partially updated. Since the tk->offs_boot update is a rare event, this
534 struct timekeeper *tk = &tk_core.timekeeper;
536 return (ktime_get_mono_fast_ns() + ktime_to_ns(data_race(tk->offs_boot)));
551 struct timekeeper *tk = &tk_core.timekeeper;
553 return (ktime_get_mono_fast_ns() + ktime_to_ns(data_race(tk->offs_tai)));
635 struct timekeeper *tk = &tk_core.timekeeper;
638 snapshot->boot = snapshot->mono + ktime_to_ns(data_race(tk->offs_boot));
643 * @tk: Timekeeper to snapshot.
646 * suspended, so take a snapshot of the readout base of @tk and use it as the
651 static void halt_fast_timekeeper(const struct timekeeper *tk)
654 const struct tk_read_base *tkr = &tk->tkr_mono;
659 tkr_dummy.base_real = tkr->base + tk->offs_real;
662 tkr = &tk->tkr_raw;
670 static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
672 raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
681 struct timekeeper *tk = &tk_core.timekeeper;
687 update_pvclock_gtod(tk, true);
715 static inline void tk_update_leap_state(struct timekeeper *tk)
717 tk->next_leap_ktime = ntp_get_next_leap();
718 if (tk->next_leap_ktime != KTIME_MAX)
720 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
726 static inline void tk_update_ktime_data(struct timekeeper *tk)
738 seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
739 nsec = (u32) tk->wall_to_monotonic.tv_nsec;
740 tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
745 * this into account before updating tk->ktime_sec.
747 nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
750 tk->ktime_sec = seconds;
753 tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC);
757 static void timekeeping_update(struct timekeeper *tk, unsigned int action)
760 tk->ntp_error = 0;
764 tk_update_leap_state(tk);
765 tk_update_ktime_data(tk);
767 update_vsyscall(tk);
768 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
770 tk->tkr_mono.base_real = tk->tkr_mono.base + tk->offs_real;
771 update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
772 update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw);
775 tk->clock_was_set_seq++;
788 * @tk: Pointer to the timekeeper to update
794 static void timekeeping_forward_now(struct timekeeper *tk)
798 cycle_now = tk_clock_read(&tk->tkr_mono);
799 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
800 tk->tkr_mono.cycle_last = cycle_now;
801 tk->tkr_raw.cycle_last = cycle_now;
803 tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
804 tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
806 tk_normalize_xtime(tk);
817 struct timekeeper *tk = &tk_core.timekeeper;
826 ts->tv_sec = tk->xtime_sec;
827 nsecs = timekeeping_get_ns(&tk->tkr_mono);
838 struct timekeeper *tk = &tk_core.timekeeper;
847 base = tk->tkr_mono.base;
848 nsecs = timekeeping_get_ns(&tk->tkr_mono);
858 struct timekeeper *tk = &tk_core.timekeeper;
866 nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
881 struct timekeeper *tk = &tk_core.timekeeper;
890 base = ktime_add(tk->tkr_mono.base, *offset);
891 nsecs = timekeeping_get_ns(&tk->tkr_mono);
902 struct timekeeper *tk = &tk_core.timekeeper;
911 base = ktime_add(tk->tkr_mono.base, *offset);
912 nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
945 struct timekeeper *tk = &tk_core.timekeeper;
952 base = tk->tkr_raw.base;
953 nsecs = timekeeping_get_ns(&tk->tkr_raw);
971 struct timekeeper *tk = &tk_core.timekeeper;
980 ts->tv_sec = tk->xtime_sec;
981 nsec = timekeeping_get_ns(&tk->tkr_mono);
982 tomono = tk->wall_to_monotonic;
996 * serialized read. tk->ktime_sec is of type 'unsigned long' so this
1003 struct timekeeper *tk = &tk_core.timekeeper;
1006 return tk->ktime_sec;
1015 * For 64bit systems the fast access to tk->xtime_sec is preserved. On
1017 * counter to provide "atomic" access to the 64bit tk->xtime_sec
1022 struct timekeeper *tk = &tk_core.timekeeper;
1027 return tk->xtime_sec;
1031 seconds = tk->xtime_sec;
1046 struct timekeeper *tk = &tk_core.timekeeper;
1048 return tk->xtime_sec;
1057 struct timekeeper *tk = &tk_core.timekeeper;
1069 now = tk_clock_read(&tk->tkr_mono);
1070 systime_snapshot->cs_id = tk->tkr_mono.clock->id;
1071 systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
1072 systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
1073 base_real = ktime_add(tk->tkr_mono.base,
1075 base_raw = tk->tkr_raw.base;
1076 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
1077 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
1126 struct timekeeper *tk = &tk_core.timekeeper;
1160 (corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
1216 struct timekeeper *tk = &tk_core.timekeeper;
1241 if (tk->tkr_mono.clock != system_counterval.cs)
1249 now = tk_clock_read(&tk->tkr_mono);
1250 interval_start = tk->tkr_mono.cycle_last;
1252 clock_was_set_seq = tk->clock_was_set_seq;
1253 cs_was_changed_seq = tk->cs_was_changed_seq;
1260 base_real = ktime_add(tk->tkr_mono.base,
1262 base_raw = tk->tkr_raw.base;
1264 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, cycles);
1265 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, cycles);
1314 struct timekeeper *tk = &tk_core.timekeeper;
1325 timekeeping_forward_now(tk);
1327 xt = tk_xtime(tk);
1330 if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
1335 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
1337 tk_set_xtime(tk, ts);
1339 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1364 struct timekeeper *tk = &tk_core.timekeeper;
1375 timekeeping_forward_now(tk);
1378 tmp = timespec64_add(tk_xtime(tk), *ts);
1379 if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 ||
1385 tk_xtime_add(tk, ts);
1386 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *ts));
1389 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1437 static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
1439 tk->tai_offset = tai_offset;
1440 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
1450 struct timekeeper *tk = &tk_core.timekeeper;
1471 timekeeping_forward_now(tk);
1474 old = tk->tkr_mono.clock;
1475 tk_setup_internals(tk, new);
1478 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1502 struct timekeeper *tk = &tk_core.timekeeper;
1504 if (tk->tkr_mono.clock == clock)
1508 return tk->tkr_mono.clock == clock ? 0 : -1;
1519 struct timekeeper *tk = &tk_core.timekeeper;
1525 ts->tv_sec = tk->raw_sec;
1526 nsecs = timekeeping_get_ns(&tk->tkr_raw);
1541 struct timekeeper *tk = &tk_core.timekeeper;
1548 ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
1560 struct timekeeper *tk = &tk_core.timekeeper;
1567 ret = tk->tkr_mono.clock->max_idle_ns;
1635 struct timekeeper *tk = &tk_core.timekeeper;
1664 tk_setup_internals(tk, clock);
1666 tk_set_xtime(tk, &wall_time);
1667 tk->raw_sec = 0;
1669 tk_set_wall_to_mono(tk, wall_to_mono);
1671 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1682 * @tk: Pointer to the timekeeper to be updated
1688 static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1697 tk_xtime_add(tk, delta);
1698 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1699 tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1752 struct timekeeper *tk = &tk_core.timekeeper;
1760 timekeeping_forward_now(tk);
1762 __timekeeping_inject_sleeptime(tk, delta);
1764 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1779 struct timekeeper *tk = &tk_core.timekeeper;
1780 struct clocksource *clock = tk->tkr_mono.clock;
1806 cycle_now = tk_clock_read(&tk->tkr_mono);
1818 __timekeeping_inject_sleeptime(tk, &ts_delta);
1822 tk->tkr_mono.cycle_last = cycle_now;
1823 tk->tkr_raw.cycle_last = cycle_now;
1825 tk->ntp_error = 0;
1827 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1841 struct timekeeper *tk = &tk_core.timekeeper;
1862 timekeeping_forward_now(tk);
1870 curr_clock = tk->tkr_mono.clock;
1871 cycle_now = tk->tkr_mono.cycle_last;
1881 delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1896 timekeeping_update(tk, TK_MIRROR);
1897 halt_fast_timekeeper(tk);
1924 static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1928 s64 interval = tk->cycle_interval;
1987 if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
1993 tk->tkr_mono.mult += mult_adj;
1994 tk->xtime_interval += interval;
1995 tk->tkr_mono.xtime_nsec -= offset;
2002 static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
2010 if (likely(tk->ntp_tick == ntp_tick_length())) {
2011 mult = tk->tkr_mono.mult - tk->ntp_err_mult;
2013 tk->ntp_tick = ntp_tick_length();
2014 mult = div64_u64((tk->ntp_tick >> tk->ntp_error_shift) -
2015 tk->xtime_remainder, tk->cycle_interval);
2024 tk->ntp_err_mult = tk->ntp_error > 0 ? 1 : 0;
2025 mult += tk->ntp_err_mult;
2027 timekeeping_apply_adjustment(tk, offset, mult - tk->tkr_mono.mult);
2029 if (unlikely(tk->tkr_mono.clock->maxadj &&
2030 (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
2031 > tk->tkr_mono.clock->maxadj))) {
2034 tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
2035 (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
2048 if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
2049 tk->tkr_mono.xtime_nsec += (u64)NSEC_PER_SEC <<
2050 tk->tkr_mono.shift;
2051 tk->xtime_sec--;
2052 tk->skip_second_overflow = 1;
2063 static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
2065 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
2068 while (tk->tkr_mono.xtime_nsec >= nsecps) {
2071 tk->tkr_mono.xtime_nsec -= nsecps;
2072 tk->xtime_sec++;
2078 if (unlikely(tk->skip_second_overflow)) {
2079 tk->skip_second_overflow = 0;
2084 leap = second_overflow(tk->xtime_sec);
2088 tk->xtime_sec += leap;
2092 tk_set_wall_to_mono(tk,
2093 timespec64_sub(tk->wall_to_monotonic, ts));
2095 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
2112 static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
2115 u64 interval = tk->cycle_interval << shift;
2124 tk->tkr_mono.cycle_last += interval;
2125 tk->tkr_raw.cycle_last += interval;
2127 tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
2128 *clock_set |= accumulate_nsecs_to_secs(tk);
2131 tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
2132 snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
2133 while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
2134 tk->tkr_raw.xtime_nsec -= snsec_per_sec;
2135 tk->raw_sec++;
2139 tk->ntp_error += tk->ntp_tick << shift;
2140 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
2141 (tk->ntp_error_shift + shift);
2153 struct timekeeper *tk = &shadow_timekeeper;
2165 offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
2166 tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
2173 timekeeping_check_update(tk, offset);
2183 shift = ilog2(offset) - ilog2(tk->cycle_interval);
2188 while (offset >= tk->cycle_interval) {
2189 offset = logarithmic_accumulation(tk, offset, shift,
2191 if (offset < tk->cycle_interval<<shift)
2196 timekeeping_adjust(tk, offset);
2202 clock_set |= accumulate_nsecs_to_secs(tk);
2215 timekeeping_update(tk, clock_set);
2216 memcpy(real_tk, tk, sizeof(*tk));
2248 struct timekeeper *tk = &tk_core.timekeeper;
2249 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
2257 struct timekeeper *tk = &tk_core.timekeeper;
2263 *ts = tk_xtime(tk);
2270 struct timekeeper *tk = &tk_core.timekeeper;
2277 now = tk_xtime(tk);
2278 mono = tk->wall_to_monotonic;
2311 struct timekeeper *tk = &tk_core.timekeeper;
2319 base = tk->tkr_mono.base;
2320 nsecs = timekeeping_get_ns(&tk->tkr_mono);
2323 if (*cwsseq != tk->clock_was_set_seq) {
2324 *cwsseq = tk->clock_was_set_seq;
2325 *offs_real = tk->offs_real;
2326 *offs_boot = tk->offs_boot;
2327 *offs_tai = tk->offs_tai;
2331 if (unlikely(base >= tk->next_leap_ktime))
2332 *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
2424 struct timekeeper *tk = &tk_core.timekeeper;
2459 orig_tai = tai = tk->tai_offset;
2463 __timekeeping_set_tai_offset(tk, tai);
2464 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
2467 tk_update_leap_state(tk);