Lines Matching refs:scd

111 notrace static void __scd_stamp(struct sched_clock_data *scd)
113 scd->tick_gtod = ktime_get_ns();
114 scd->tick_raw = sched_clock();
119 struct sched_clock_data *scd;
123 * to disable IRQs in order to get a consistent scd->tick* reading.
126 scd = this_scd();
130 __sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw);
134 scd->tick_gtod, __gtod_offset,
135 scd->tick_raw, __sched_clock_offset);
154 struct sched_clock_data *scd;
159 scd = this_scd();
160 __scd_stamp(scd);
161 scd->clock = scd->tick_gtod + __gtod_offset;
166 per_cpu(sched_clock_data, cpu) = *scd;
170 scd->tick_gtod, __gtod_offset,
171 scd->tick_raw, __sched_clock_offset);
199 struct sched_clock_data *scd = this_scd();
201 __scd_stamp(scd);
202 __gtod_offset = (scd->tick_raw + __sched_clock_offset) - scd->tick_gtod;
258 * update the percpu scd from the raw @now value
263 static __always_inline u64 sched_clock_local(struct sched_clock_data *scd)
270 delta = now - scd->tick_raw;
274 old_clock = scd->clock;
277 * scd->clock = clamp(scd->tick_gtod + delta,
278 * max(scd->tick_gtod, scd->clock),
279 * scd->tick_gtod + TICK_NSEC);
282 gtod = scd->tick_gtod + __gtod_offset;
290 if (!raw_try_cmpxchg64(&scd->clock, &old_clock, clock))
321 static notrace u64 sched_clock_remote(struct sched_clock_data *scd)
346 remote_clock = cmpxchg64(&scd->clock, 0, 0);
349 * On 64-bit kernels the read of [my]scd->clock is atomic versus the
355 remote_clock = scd->clock;
365 ptr = &scd->clock;
390 struct sched_clock_data *scd;
400 scd = cpu_sdc(cpu);
403 clock = sched_clock_remote(scd);
405 clock = sched_clock_local(scd);
414 struct sched_clock_data *scd;
424 scd = this_scd();
425 __scd_stamp(scd);
426 sched_clock_local(scd);