1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (C) 2018 ARM Limited
4 */
5 #ifndef __ASM_VDSO_GETTIMEOFDAY_H
6 #define __ASM_VDSO_GETTIMEOFDAY_H
7
8 #ifndef __ASSEMBLY__
9
10 #include <asm/barrier.h>
11 #include <asm/unistd.h>
12
13 #define VDSO_HAS_CLOCK_GETRES 1
14
15 static __always_inline
gettimeofday_fallback(struct __kernel_old_timeval * _tv,struct timezone * _tz)16 int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
17 struct timezone *_tz)
18 {
19 register struct timezone *tz asm("x1") = _tz;
20 register struct __kernel_old_timeval *tv asm("x0") = _tv;
21 register long ret asm ("x0");
22 register long nr asm("x8") = __NR_gettimeofday;
23
24 asm volatile(
25 " svc #0\n"
26 : "=r" (ret)
27 : "r" (tv), "r" (tz), "r" (nr)
28 : "memory");
29
30 return ret;
31 }
32
33 static __always_inline
clock_gettime_fallback(clockid_t _clkid,struct __kernel_timespec * _ts)34 long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
35 {
36 register struct __kernel_timespec *ts asm("x1") = _ts;
37 register clockid_t clkid asm("x0") = _clkid;
38 register long ret asm ("x0");
39 register long nr asm("x8") = __NR_clock_gettime;
40
41 asm volatile(
42 " svc #0\n"
43 : "=r" (ret)
44 : "r" (clkid), "r" (ts), "r" (nr)
45 : "memory");
46
47 return ret;
48 }
49
50 static __always_inline
clock_getres_fallback(clockid_t _clkid,struct __kernel_timespec * _ts)51 int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
52 {
53 register struct __kernel_timespec *ts asm("x1") = _ts;
54 register clockid_t clkid asm("x0") = _clkid;
55 register long ret asm ("x0");
56 register long nr asm("x8") = __NR_clock_getres;
57
58 asm volatile(
59 " svc #0\n"
60 : "=r" (ret)
61 : "r" (clkid), "r" (ts), "r" (nr)
62 : "memory");
63
64 return ret;
65 }
66
__arch_get_hw_counter(s32 clock_mode,const struct vdso_data * vd)67 static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
68 const struct vdso_data *vd)
69 {
70 u64 res;
71
72 /*
73 * Core checks for mode already, so this raced against a concurrent
74 * update. Return something. Core will do another round and then
75 * see the mode change and fallback to the syscall.
76 */
77 if (clock_mode == VDSO_CLOCKMODE_NONE)
78 return 0;
79
80 /*
81 * This isb() is required to prevent that the counter value
82 * is speculated.
83 */
84 isb();
85 asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory");
86 arch_counter_enforce_ordering(res);
87
88 return res;
89 }
90
91 static __always_inline
__arch_get_vdso_data(void)92 const struct vdso_data *__arch_get_vdso_data(void)
93 {
94 return _vdso_data;
95 }
96
97 #ifdef CONFIG_TIME_NS
98 static __always_inline
__arch_get_timens_vdso_data(const struct vdso_data * vd)99 const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
100 {
101 return _timens_data;
102 }
103 #endif
104
105 #endif /* !__ASSEMBLY__ */
106
107 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
108