1 /*
2 * asid.c: ASID management
3 * Copyright (c) 2007, Advanced Micro Devices, Inc.
4 * Copyright (c) 2009, Citrix Systems, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include <xen/init.h>
20 #include <xen/lib.h>
21 #include <xen/param.h>
22 #include <xen/sched.h>
23 #include <xen/smp.h>
24 #include <xen/percpu.h>
25 #include <asm/hvm/asid.h>
26
27 /* Xen command-line option to enable ASIDs */
28 static bool __read_mostly opt_asid_enabled = true;
29 boolean_param("asid", opt_asid_enabled);
30
31 /*
32 * ASIDs partition the physical TLB. In the current implementation ASIDs are
33 * introduced to reduce the number of TLB flushes. Each time the guest's
34 * virtual address space changes (e.g. due to an INVLPG, MOV-TO-{CR3, CR4}
35 * operation), instead of flushing the TLB, a new ASID is assigned. This
36 * reduces the number of TLB flushes to at most 1/#ASIDs. The biggest
37 * advantage is that hot parts of the hypervisor's code and data retain in
38 * the TLB.
39 *
40 * Sketch of the Implementation:
41 *
42 * ASIDs are a CPU-local resource. As preemption of ASIDs is not possible,
43 * ASIDs are assigned in a round-robin scheme. To minimize the overhead of
44 * ASID invalidation, at the time of a TLB flush, ASIDs are tagged with a
45 * 64-bit generation. Only on a generation overflow the code needs to
46 * invalidate all ASID information stored at the VCPUs with are run on the
47 * specific physical processor. This overflow appears after about 2^80
48 * host processor cycles, so we do not optimize this case, but simply disable
49 * ASID useage to retain correctness.
50 */
51
52 /* Per-CPU ASID management. */
53 struct hvm_asid_data {
54 uint64_t core_asid_generation;
55 uint32_t next_asid;
56 uint32_t max_asid;
57 bool_t disabled;
58 };
59
60 static DEFINE_PER_CPU(struct hvm_asid_data, hvm_asid_data);
61
hvm_asid_init(int nasids)62 void hvm_asid_init(int nasids)
63 {
64 static int8_t g_disabled = -1;
65 struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
66
67 data->max_asid = nasids - 1;
68 data->disabled = !opt_asid_enabled || (nasids <= 1);
69
70 if ( g_disabled != data->disabled )
71 {
72 printk("HVM: ASIDs %sabled.\n", data->disabled ? "dis" : "en");
73 if ( g_disabled < 0 )
74 g_disabled = data->disabled;
75 }
76
77 /* Zero indicates 'invalid generation', so we start the count at one. */
78 data->core_asid_generation = 1;
79
80 /* Zero indicates 'ASIDs disabled', so we start the count at one. */
81 data->next_asid = 1;
82 }
83
hvm_asid_flush_vcpu_asid(struct hvm_vcpu_asid * asid)84 void hvm_asid_flush_vcpu_asid(struct hvm_vcpu_asid *asid)
85 {
86 write_atomic(&asid->generation, 0);
87 }
88
hvm_asid_flush_vcpu(struct vcpu * v)89 void hvm_asid_flush_vcpu(struct vcpu *v)
90 {
91 hvm_asid_flush_vcpu_asid(&v->arch.hvm.n1asid);
92 hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid);
93 }
94
hvm_asid_flush_core(void)95 void hvm_asid_flush_core(void)
96 {
97 struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
98
99 if ( data->disabled )
100 return;
101
102 if ( likely(++data->core_asid_generation != 0) )
103 return;
104
105 /*
106 * ASID generations are 64 bit. Overflow of generations never happens.
107 * For safety, we simply disable ASIDs, so correctness is established; it
108 * only runs a bit slower.
109 */
110 printk("HVM: ASID generation overrun. Disabling ASIDs.\n");
111 data->disabled = 1;
112 }
113
hvm_asid_handle_vmenter(struct hvm_vcpu_asid * asid)114 bool_t hvm_asid_handle_vmenter(struct hvm_vcpu_asid *asid)
115 {
116 struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
117
118 /* On erratum #170 systems we must flush the TLB.
119 * Generation overruns are taken here, too. */
120 if ( data->disabled )
121 goto disabled;
122
123 /* Test if VCPU has valid ASID. */
124 if ( read_atomic(&asid->generation) == data->core_asid_generation )
125 return 0;
126
127 /* If there are no free ASIDs, need to go to a new generation */
128 if ( unlikely(data->next_asid > data->max_asid) )
129 {
130 hvm_asid_flush_core();
131 data->next_asid = 1;
132 if ( data->disabled )
133 goto disabled;
134 }
135
136 /* Now guaranteed to be a free ASID. */
137 asid->asid = data->next_asid++;
138 write_atomic(&asid->generation, data->core_asid_generation);
139
140 /*
141 * When we assign ASID 1, flush all TLB entries as we are starting a new
142 * generation, and all old ASID allocations are now stale.
143 */
144 return (asid->asid == 1);
145
146 disabled:
147 asid->asid = 0;
148 return 0;
149 }
150
151 /*
152 * Local variables:
153 * mode: C
154 * c-file-style: "BSD"
155 * c-basic-offset: 4
156 * tab-width: 4
157 * indent-tabs-mode: nil
158 * End:
159 */
160