1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * You SHOULD NOT be including this unless you're vsyscall
4 * handling code or timekeeping internal code!
5 */
6
7 #ifndef _LINUX_TIMEKEEPER_INTERNAL_H
8 #define _LINUX_TIMEKEEPER_INTERNAL_H
9
10 #include <linux/clocksource.h>
11 #include <linux/jiffies.h>
12 #include <linux/time.h>
13
14 /**
15 * struct tk_read_base - base structure for timekeeping readout
16 * @clock: Current clocksource used for timekeeping.
17 * @mask: Bitmask for two's complement subtraction of non 64bit clocks
18 * @cycle_last: @clock cycle value at last update
19 * @mult: (NTP adjusted) multiplier for scaled math conversion
20 * @shift: Shift value for scaled math conversion
21 * @xtime_nsec: Shifted (fractional) nano seconds offset for readout
22 * @base: ktime_t (nanoseconds) base time for readout
23 * @base_real: Nanoseconds base value for clock REALTIME readout
24 *
25 * This struct has size 56 byte on 64 bit. Together with a seqcount it
26 * occupies a single 64byte cache line.
27 *
28 * The struct is separate from struct timekeeper as it is also used
29 * for a fast NMI safe accessors.
30 *
31 * @base_real is for the fast NMI safe accessor to allow reading clock
32 * realtime from any context.
33 */
34 struct tk_read_base {
35 struct clocksource *clock;
36 u64 mask;
37 u64 cycle_last;
38 u32 mult;
39 u32 shift;
40 u64 xtime_nsec;
41 ktime_t base;
42 u64 base_real;
43 };
44
45 /**
46 * struct timekeeper - Structure holding internal timekeeping values.
47 * @tkr_mono: The readout base structure for CLOCK_MONOTONIC
48 * @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW
49 * @xtime_sec: Current CLOCK_REALTIME time in seconds
50 * @ktime_sec: Current CLOCK_MONOTONIC time in seconds
51 * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset
52 * @offs_real: Offset clock monotonic -> clock realtime
53 * @offs_boot: Offset clock monotonic -> clock boottime
54 * @offs_tai: Offset clock monotonic -> clock tai
55 * @tai_offset: The current UTC to TAI offset in seconds
56 * @clock_was_set_seq: The sequence number of clock was set events
57 * @cs_was_changed_seq: The sequence number of clocksource change events
58 * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
59 * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds
60 * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset
61 * @cycle_interval: Number of clock cycles in one NTP interval
62 * @xtime_interval: Number of clock shifted nano seconds in one NTP
63 * interval.
64 * @xtime_remainder: Shifted nano seconds left over when rounding
65 * @cycle_interval
66 * @raw_interval: Shifted raw nano seconds accumulated per NTP interval.
67 * @ntp_error: Difference between accumulated time and NTP time in ntp
68 * shifted nano seconds.
69 * @ntp_error_shift: Shift conversion between clock shifted nano seconds and
70 * ntp shifted nano seconds.
71 * @last_warning: Warning ratelimiter (DEBUG_TIMEKEEPING)
72 * @underflow_seen: Underflow warning flag (DEBUG_TIMEKEEPING)
73 * @overflow_seen: Overflow warning flag (DEBUG_TIMEKEEPING)
74 *
75 * Note: For timespec(64) based interfaces wall_to_monotonic is what
76 * we need to add to xtime (or xtime corrected for sub jiffie times)
77 * to get to monotonic time. Monotonic is pegged at zero at system
78 * boot time, so wall_to_monotonic will be negative, however, we will
79 * ALWAYS keep the tv_nsec part positive so we can use the usual
80 * normalization.
81 *
82 * wall_to_monotonic is moved after resume from suspend for the
83 * monotonic time not to jump. We need to add total_sleep_time to
84 * wall_to_monotonic to get the real boot based time offset.
85 *
86 * wall_to_monotonic is no longer the boot time, getboottime must be
87 * used instead.
88 *
89 * @monotonic_to_boottime is a timespec64 representation of @offs_boot to
90 * accelerate the VDSO update for CLOCK_BOOTTIME.
91 */
92 struct timekeeper {
93 struct tk_read_base tkr_mono;
94 struct tk_read_base tkr_raw;
95 u64 xtime_sec;
96 unsigned long ktime_sec;
97 struct timespec64 wall_to_monotonic;
98 ktime_t offs_real;
99 ktime_t offs_boot;
100 ktime_t offs_tai;
101 s32 tai_offset;
102 unsigned int clock_was_set_seq;
103 u8 cs_was_changed_seq;
104 ktime_t next_leap_ktime;
105 u64 raw_sec;
106 struct timespec64 monotonic_to_boot;
107
108 /* The following members are for timekeeping internal use */
109 u64 cycle_interval;
110 u64 xtime_interval;
111 s64 xtime_remainder;
112 u64 raw_interval;
113 /* The ntp_tick_length() value currently being used.
114 * This cached copy ensures we consistently apply the tick
115 * length for an entire tick, as ntp_tick_length may change
116 * mid-tick, and we don't want to apply that new value to
117 * the tick in progress.
118 */
119 u64 ntp_tick;
120 /* Difference between accumulated time and NTP time in ntp
121 * shifted nano seconds. */
122 s64 ntp_error;
123 u32 ntp_error_shift;
124 u32 ntp_err_mult;
125 /* Flag used to avoid updating NTP twice with same second */
126 u32 skip_second_overflow;
127 #ifdef CONFIG_DEBUG_TIMEKEEPING
128 long last_warning;
129 /*
130 * These simple flag variables are managed
131 * without locks, which is racy, but they are
132 * ok since we don't really care about being
133 * super precise about how many events were
134 * seen, just that a problem was observed.
135 */
136 int underflow_seen;
137 int overflow_seen;
138 #endif
139 };
140
141 #ifdef CONFIG_GENERIC_TIME_VSYSCALL
142
143 extern void update_vsyscall(struct timekeeper *tk);
144 extern void update_vsyscall_tz(void);
145
146 #else
147
update_vsyscall(struct timekeeper * tk)148 static inline void update_vsyscall(struct timekeeper *tk)
149 {
150 }
update_vsyscall_tz(void)151 static inline void update_vsyscall_tz(void)
152 {
153 }
154 #endif
155
156 #endif /* _LINUX_TIMEKEEPER_INTERNAL_H */
157