1 /******************************************************************************
2  * flushtlb.h
3  *
4  * TLB flushes are timestamped using a global virtual 'clock' which ticks
5  * on any TLB flush on any processor.
6  *
7  * Copyright (c) 2003-2004, K A Fraser
8  */
9 
10 #ifndef __FLUSHTLB_H__
11 #define __FLUSHTLB_H__
12 
13 #include <xen/mm.h>
14 #include <xen/percpu.h>
15 #include <xen/smp.h>
16 #include <xen/types.h>
17 
18 /* The current time as shown by the virtual TLB clock. */
19 extern u32 tlbflush_clock;
20 
21 /* Time at which each CPU's TLB was last flushed. */
22 DECLARE_PER_CPU(u32, tlbflush_time);
23 
24 /* TLB clock is in use. */
25 extern bool tlb_clk_enabled;
26 
tlbflush_current_time(void)27 static inline uint32_t tlbflush_current_time(void)
28 {
29     /* Returning 0 from tlbflush_current_time will always force a flush. */
30     return tlb_clk_enabled ? tlbflush_clock : 0;
31 }
32 
page_set_tlbflush_timestamp(struct page_info * page)33 static inline void page_set_tlbflush_timestamp(struct page_info *page)
34 {
35     /* Avoid the write if the TLB clock is disabled. */
36     if ( !tlb_clk_enabled )
37         return;
38 
39     /*
40      * Prevent storing a stale time stamp, which could happen if an update
41      * to tlbflush_clock plus a subsequent flush IPI happen between the
42      * reading of tlbflush_clock and the writing of the struct page_info
43      * field.
44      */
45     ASSERT(local_irq_is_enabled());
46     local_irq_disable();
47     page->tlbflush_timestamp = tlbflush_current_time();
48     local_irq_enable();
49 }
50 
51 /*
52  * @cpu_stamp is the timestamp at last TLB flush for the CPU we are testing.
53  * @lastuse_stamp is a timestamp taken when the PFN we are testing was last
54  * used for a purpose that may have caused the CPU's TLB to become tainted.
55  */
NEED_FLUSH(u32 cpu_stamp,u32 lastuse_stamp)56 static inline bool NEED_FLUSH(u32 cpu_stamp, u32 lastuse_stamp)
57 {
58     u32 curr_time = tlbflush_current_time();
59     /*
60      * Two cases:
61      *  1. During a wrap, the clock ticks over to 0 while CPUs catch up. For
62      *     safety during this period, we force a flush if @curr_time == 0.
63      *  2. Otherwise, we look to see if @cpu_stamp <= @lastuse_stamp.
64      *     To detect false positives because @cpu_stamp has wrapped, we
65      *     also check @curr_time. If less than @lastuse_stamp we definitely
66      *     wrapped, so there's no need for a flush (one is forced every wrap).
67      */
68     return ((curr_time == 0) ||
69             ((cpu_stamp <= lastuse_stamp) &&
70              (lastuse_stamp <= curr_time)));
71 }
72 
73 /*
74  * Filter the given set of CPUs, removing those that definitely flushed their
75  * TLB since @page_timestamp.
76  */
tlbflush_filter(cpumask_t * mask,uint32_t page_timestamp)77 static inline void tlbflush_filter(cpumask_t *mask, uint32_t page_timestamp)
78 {
79     unsigned int cpu;
80 
81     /* Short-circuit: there's no need to iterate if the clock is disabled. */
82     if ( !tlb_clk_enabled )
83         return;
84 
85     for_each_cpu ( cpu, mask )
86         if ( !NEED_FLUSH(per_cpu(tlbflush_time, cpu), page_timestamp) )
87             __cpumask_clear_cpu(cpu, mask);
88 }
89 
90 void new_tlbflush_clock_period(void);
91 
92 /* Read pagetable base. */
read_cr3(void)93 static inline unsigned long read_cr3(void)
94 {
95     unsigned long cr3;
96     __asm__ __volatile__ (
97         "mov %%cr3, %0" : "=r" (cr3) : );
98     return cr3;
99 }
100 
101 /* Write pagetable base and implicitly tick the tlbflush clock. */
102 void switch_cr3_cr4(unsigned long cr3, unsigned long cr4);
103 
104 /* flush_* flag fields: */
105  /*
106   * Area to flush: 2^flush_order pages. Default is flush entire address space.
107   * NB. Multi-page areas do not need to have been mapped with a superpage.
108   */
109 #define FLUSH_ORDER_MASK 0xff
110 #define FLUSH_ORDER(x)   ((x)+1)
111  /* Flush TLBs (or parts thereof) */
112 #define FLUSH_TLB        0x100
113  /* Flush TLBs (or parts thereof) including global mappings */
114 #define FLUSH_TLB_GLOBAL 0x200
115  /* Flush data caches */
116 #define FLUSH_CACHE      0x400
117  /* VA for the flush has a valid mapping */
118 #define FLUSH_VA_VALID   0x800
119  /* Flush CPU state */
120 #define FLUSH_VCPU_STATE 0x1000
121  /* Flush the per-cpu root page table */
122 #define FLUSH_ROOT_PGTBL 0x2000
123 #if CONFIG_HVM
124  /* Flush all HVM guests linear TLB (using ASID/VPID) */
125 #define FLUSH_HVM_ASID_CORE 0x4000
126 #else
127 #define FLUSH_HVM_ASID_CORE 0
128 #endif
129 #if defined(CONFIG_PV) || defined(CONFIG_SHADOW_PAGING)
130 /*
131  * Force an IPI to be sent. Note that adding this to the flags passed to
132  * flush_area_mask will prevent using the assisted flush without having any
133  * other side effect.
134  */
135 # define FLUSH_FORCE_IPI 0x8000
136 #else
137 # define FLUSH_FORCE_IPI 0
138 #endif
139 
140 /* Flush local TLBs/caches. */
141 unsigned int flush_area_local(const void *va, unsigned int flags);
142 #define flush_local(flags) flush_area_local(NULL, flags)
143 
144 /* Flush specified CPUs' TLBs/caches */
145 void flush_area_mask(const cpumask_t *, const void *va, unsigned int flags);
146 #define flush_mask(mask, flags) flush_area_mask(mask, NULL, flags)
147 
148 /* Flush all CPUs' TLBs/caches */
149 #define flush_area_all(va, flags) flush_area_mask(&cpu_online_map, va, flags)
150 #define flush_all(flags) flush_mask(&cpu_online_map, flags)
151 
152 /* Flush local TLBs */
153 #define flush_tlb_local()                       \
154     flush_local(FLUSH_TLB)
155 #define flush_tlb_one_local(v)                  \
156     flush_area_local((const void *)(v), FLUSH_TLB|FLUSH_ORDER(0))
157 
158 /* Flush specified CPUs' TLBs */
159 #define flush_tlb_mask(mask)                    \
160     flush_mask(mask, FLUSH_TLB)
161 #define flush_tlb_one_mask(mask,v)              \
162     flush_area_mask(mask, (const void *)(v), FLUSH_TLB|FLUSH_ORDER(0))
163 
164 /*
165  * Make the common code TLB flush helper force use of an IPI in order to be
166  * on the safe side. Note that not all calls from common code strictly require
167  * this.
168  */
169 #define arch_flush_tlb_mask(mask) flush_mask(mask, FLUSH_TLB | FLUSH_FORCE_IPI)
170 
171 /* Flush all CPUs' TLBs */
172 #define flush_tlb_all()                         \
173     flush_tlb_mask(&cpu_online_map)
174 #define flush_tlb_one_all(v)                    \
175     flush_tlb_one_mask(&cpu_online_map, v)
176 
177 #define flush_root_pgtbl_domain(d)                                       \
178 {                                                                        \
179     if ( is_pv_domain(d) && (d)->arch.pv.xpti )                          \
180         flush_mask((d)->dirty_cpumask, FLUSH_ROOT_PGTBL);                \
181 }
182 
flush_page_to_ram(unsigned long mfn,bool sync_icache)183 static inline void flush_page_to_ram(unsigned long mfn, bool sync_icache) {}
invalidate_dcache_va_range(const void * p,unsigned long size)184 static inline int invalidate_dcache_va_range(const void *p,
185                                              unsigned long size)
186 { return -EOPNOTSUPP; }
clean_and_invalidate_dcache_va_range(const void * p,unsigned long size)187 static inline int clean_and_invalidate_dcache_va_range(const void *p,
188                                                        unsigned long size)
189 {
190     unsigned int order = get_order_from_bytes(size);
191     /* sub-page granularity support needs to be added if necessary */
192     flush_area_local(p, FLUSH_CACHE|FLUSH_ORDER(order));
193     return 0;
194 }
clean_dcache_va_range(const void * p,unsigned long size)195 static inline int clean_dcache_va_range(const void *p, unsigned long size)
196 {
197     return clean_and_invalidate_dcache_va_range(p, size);
198 }
199 
200 unsigned int guest_flush_tlb_flags(const struct domain *d);
201 void guest_flush_tlb_mask(const struct domain *d, const cpumask_t *mask);
202 
203 #endif /* __FLUSHTLB_H__ */
204