1 /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2 because MTRRs can span upto 40 bits (36bits on most modern x86) */
3 #include <xen/lib.h>
4 #include <xen/init.h>
5 #include <xen/mm.h>
6 #include <xen/param.h>
7 #include <xen/stdbool.h>
8 #include <asm/flushtlb.h>
9 #include <asm/invpcid.h>
10 #include <asm/io.h>
11 #include <asm/mtrr.h>
12 #include <asm/msr.h>
13 #include <asm/system.h>
14 #include <asm/cpufeature.h>
15 #include "mtrr.h"
16
17 static const struct fixed_range_block {
18 uint32_t base_msr; /* start address of an MTRR block */
19 unsigned int ranges; /* number of MTRRs in this block */
20 } fixed_range_blocks[] = {
21 { MSR_MTRRfix64K_00000, (0x80000 - 0x00000) >> (16 + 3) },
22 { MSR_MTRRfix16K_80000, (0xC0000 - 0x80000) >> (14 + 3) },
23 { MSR_MTRRfix4K_C0000, (0x100000 - 0xC0000) >> (12 + 3) },
24 {}
25 };
26
27 static unsigned long smp_changes_mask;
28 struct mtrr_state mtrr_state = {};
29
30 /* Get the MSR pair relating to a var range */
31 static void
get_mtrr_var_range(unsigned int index,struct mtrr_var_range * vr)32 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
33 {
34 rdmsrl(MSR_IA32_MTRR_PHYSBASE(index), vr->base);
35 rdmsrl(MSR_IA32_MTRR_PHYSMASK(index), vr->mask);
36 }
37
38 static void
get_fixed_ranges(mtrr_type * frs)39 get_fixed_ranges(mtrr_type * frs)
40 {
41 uint64_t *p = (uint64_t *) frs;
42 const struct fixed_range_block *block;
43
44 if (!mtrr_state.have_fixed)
45 return;
46
47 for (block = fixed_range_blocks; block->ranges; ++block) {
48 unsigned int i;
49
50 for (i = 0; i < block->ranges; ++i, ++p)
51 rdmsrl(block->base_msr + i, *p);
52 }
53 }
54
is_var_mtrr_overlapped(const struct mtrr_state * m)55 bool is_var_mtrr_overlapped(const struct mtrr_state *m)
56 {
57 unsigned int seg, i;
58 unsigned int num_var_ranges = MASK_EXTR(m->mtrr_cap, MTRRcap_VCNT);
59
60 for ( i = 0; i < num_var_ranges; i++ )
61 {
62 uint64_t base1 = m->var_ranges[i].base >> PAGE_SHIFT;
63 uint64_t mask1 = m->var_ranges[i].mask >> PAGE_SHIFT;
64
65 if ( !(m->var_ranges[i].mask & MTRR_PHYSMASK_VALID) )
66 continue;
67
68 for ( seg = i + 1; seg < num_var_ranges; seg++ )
69 {
70 uint64_t base2 = m->var_ranges[seg].base >> PAGE_SHIFT;
71 uint64_t mask2 = m->var_ranges[seg].mask >> PAGE_SHIFT;
72
73 if ( !(m->var_ranges[seg].mask & MTRR_PHYSMASK_VALID) )
74 continue;
75
76 if ( (base1 & mask1 & mask2) == (base2 & mask2 & mask1) )
77 {
78 /* MTRRs overlap. */
79 return true;
80 }
81 }
82 }
83
84 return false;
85 }
86
mtrr_save_fixed_ranges(void * info)87 void mtrr_save_fixed_ranges(void *info)
88 {
89 get_fixed_ranges(mtrr_state.fixed_ranges);
90 }
91
92 /* Grab all of the MTRR state for this CPU into *state */
get_mtrr_state(void)93 void __init get_mtrr_state(void)
94 {
95 unsigned int i;
96 struct mtrr_var_range *vrs;
97 uint64_t msr_content;
98
99 if (!mtrr_state.var_ranges) {
100 mtrr_state.var_ranges = xmalloc_array(struct mtrr_var_range,
101 num_var_ranges);
102 if (!mtrr_state.var_ranges)
103 return;
104 }
105 vrs = mtrr_state.var_ranges;
106
107 rdmsrl(MSR_MTRRcap, msr_content);
108 mtrr_state.have_fixed = (msr_content >> 8) & 1;
109
110 for (i = 0; i < num_var_ranges; i++)
111 get_mtrr_var_range(i, &vrs[i]);
112 get_fixed_ranges(mtrr_state.fixed_ranges);
113
114 rdmsrl(MSR_MTRRdefType, msr_content);
115 mtrr_state.def_type = (msr_content & 0xff);
116 mtrr_state.enabled = MASK_EXTR(msr_content, MTRRdefType_E);
117 mtrr_state.fixed_enabled = MASK_EXTR(msr_content, MTRRdefType_FE);
118
119 /* Store mtrr_cap for HVM MTRR virtualisation. */
120 rdmsrl(MSR_MTRRcap, mtrr_state.mtrr_cap);
121 }
122
123 static bool_t __initdata mtrr_show;
124 boolean_param("mtrr.show", mtrr_show);
125
mtrr_attrib_to_str(mtrr_type x)126 static const char *__init mtrr_attrib_to_str(mtrr_type x)
127 {
128 static const char __initconst strings[MTRR_NUM_TYPES][16] =
129 {
130 [MTRR_TYPE_UNCACHABLE] = "uncachable",
131 [MTRR_TYPE_WRCOMB] = "write-combining",
132 [MTRR_TYPE_WRTHROUGH] = "write-through",
133 [MTRR_TYPE_WRPROT] = "write-protect",
134 [MTRR_TYPE_WRBACK] = "write-back",
135 };
136
137 return (x < ARRAY_SIZE(strings) && strings[x][0]) ? strings[x] : "?";
138 }
139
140 static unsigned int __initdata last_fixed_start;
141 static unsigned int __initdata last_fixed_end;
142 static mtrr_type __initdata last_fixed_type;
143
print_fixed_last(const char * level)144 static void __init print_fixed_last(const char *level)
145 {
146 if (!last_fixed_end)
147 return;
148
149 printk("%s %05x-%05x %s\n", level, last_fixed_start,
150 last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
151
152 last_fixed_end = 0;
153 }
154
update_fixed_last(unsigned int base,unsigned int end,mtrr_type type)155 static void __init update_fixed_last(unsigned int base, unsigned int end,
156 mtrr_type type)
157 {
158 last_fixed_start = base;
159 last_fixed_end = end;
160 last_fixed_type = type;
161 }
162
print_fixed(unsigned int base,unsigned int step,const mtrr_type * types,const char * level)163 static void __init print_fixed(unsigned int base, unsigned int step,
164 const mtrr_type *types, const char *level)
165 {
166 unsigned i;
167
168 for (i = 0; i < 8; ++i, ++types, base += step) {
169 if (last_fixed_end == 0) {
170 update_fixed_last(base, base + step, *types);
171 continue;
172 }
173 if (last_fixed_end == base && last_fixed_type == *types) {
174 last_fixed_end = base + step;
175 continue;
176 }
177 /* new segments: gap or different type */
178 print_fixed_last(level);
179 update_fixed_last(base, base + step, *types);
180 }
181 }
182
print_mtrr_state(const char * level)183 static void __init print_mtrr_state(const char *level)
184 {
185 unsigned int i;
186 int width;
187
188 printk("%sMTRR default type: %s\n", level,
189 mtrr_attrib_to_str(mtrr_state.def_type));
190 if (mtrr_state.have_fixed) {
191 const mtrr_type *fr = mtrr_state.fixed_ranges;
192 const struct fixed_range_block *block = fixed_range_blocks;
193 unsigned int base = 0, step = 0x10000;
194
195 printk("%sMTRR fixed ranges %sabled:\n", level,
196 mtrr_state.fixed_enabled ? "en" : "dis");
197 for (; block->ranges; ++block, step >>= 2) {
198 for (i = 0; i < block->ranges; ++i, fr += 8) {
199 print_fixed(base, step, fr, level);
200 base += 8 * step;
201 }
202 }
203 print_fixed_last(level);
204 }
205 printk("%sMTRR variable ranges %sabled:\n", level,
206 mtrr_state.enabled ? "en" : "dis");
207 width = (paddr_bits - PAGE_SHIFT + 3) / 4;
208
209 for (i = 0; i < num_var_ranges; ++i) {
210 if (mtrr_state.var_ranges[i].mask & MTRR_PHYSMASK_VALID)
211 printk("%s %u base %0*"PRIx64"000 mask %0*"PRIx64"000 %s\n",
212 level, i,
213 width, mtrr_state.var_ranges[i].base >> 12,
214 width, mtrr_state.var_ranges[i].mask >> 12,
215 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base &
216 MTRR_PHYSBASE_TYPE_MASK));
217 else
218 printk("%s %u disabled\n", level, i);
219 }
220
221 if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
222 boot_cpu_data.x86 >= 0xf) ||
223 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
224 uint64_t syscfg, tom2;
225
226 rdmsrl(MSR_K8_SYSCFG, syscfg);
227 if (syscfg & (1 << 21)) {
228 rdmsrl(MSR_K8_TOP_MEM2, tom2);
229 printk("%sTOM2: %012"PRIx64"%s\n", level, tom2,
230 syscfg & (1 << 22) ? " (WB)" : "");
231 }
232 }
233 }
234
235 /* Some BIOS's are fucked and don't set all MTRRs the same! */
mtrr_state_warn(void)236 void __init mtrr_state_warn(void)
237 {
238 unsigned long mask = smp_changes_mask;
239
240 if (mtrr_show)
241 print_mtrr_state(mask ? KERN_WARNING : "");
242 if (!mask)
243 return;
244 if (mask & MTRR_CHANGE_MASK_FIXED)
245 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
246 if (mask & MTRR_CHANGE_MASK_VARIABLE)
247 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
248 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
249 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
250 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
251 printk(KERN_INFO "mtrr: corrected configuration.\n");
252 if (!mtrr_show)
253 print_mtrr_state(KERN_INFO);
254 }
255
256 /* Doesn't attempt to pass an error out to MTRR users
257 because it's quite complicated in some cases and probably not
258 worth it because the best error handling is to ignore it. */
mtrr_wrmsr(unsigned int msr,uint64_t msr_content)259 static void mtrr_wrmsr(unsigned int msr, uint64_t msr_content)
260 {
261 if (wrmsr_safe(msr, msr_content) < 0)
262 printk(KERN_ERR
263 "MTRR: CPU %u: Writing MSR %x to %"PRIx64" failed\n",
264 smp_processor_id(), msr, msr_content);
265 /* Cache overlap status for efficient HVM MTRR virtualisation. */
266 mtrr_state.overlapped = is_var_mtrr_overlapped(&mtrr_state);
267 }
268
269 /**
270 * Checks and updates an fixed-range MTRR if it differs from the value it
271 * should have. If K8 extenstions are wanted, update the K8 SYSCFG MSR also.
272 * see AMD publication no. 24593, chapter 7.8.1, page 233 for more information
273 * \param msr MSR address of the MTTR which should be checked and updated
274 * \param changed pointer which indicates whether the MTRR needed to be changed
275 * \param msrwords pointer to the MSR values which the MSR should have
276 */
set_fixed_range(int msr,bool * changed,unsigned int * msrwords)277 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
278 {
279 uint64_t msr_content, val;
280
281 rdmsrl(msr, msr_content);
282 val = ((uint64_t)msrwords[1] << 32) | msrwords[0];
283
284 if (msr_content != val) {
285 mtrr_wrmsr(msr, val);
286 *changed = true;
287 }
288 }
289
generic_get_free_region(unsigned long base,unsigned long size,int replace_reg)290 int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
291 /* [SUMMARY] Get a free MTRR.
292 <base> The starting (base) address of the region.
293 <size> The size (in bytes) of the region.
294 [RETURNS] The index of the region on success, else -1 on error.
295 */
296 {
297 int i, max;
298 mtrr_type ltype;
299 unsigned long lbase, lsize;
300
301 max = num_var_ranges;
302 if (replace_reg >= 0 && replace_reg < max)
303 return replace_reg;
304 for (i = 0; i < max; ++i) {
305 mtrr_if->get(i, &lbase, &lsize, <ype);
306 if (lsize == 0)
307 return i;
308 }
309 return -ENOSPC;
310 }
311
generic_get_mtrr(unsigned int reg,unsigned long * base,unsigned long * size,mtrr_type * type)312 static void generic_get_mtrr(unsigned int reg, unsigned long *base,
313 unsigned long *size, mtrr_type *type)
314 {
315 uint64_t _mask, _base;
316
317 rdmsrl(MSR_IA32_MTRR_PHYSMASK(reg), _mask);
318 if (!(_mask & MTRR_PHYSMASK_VALID)) {
319 /* Invalid (i.e. free) range */
320 *base = 0;
321 *size = 0;
322 *type = 0;
323 return;
324 }
325
326 rdmsrl(MSR_IA32_MTRR_PHYSBASE(reg), _base);
327
328 /* Work out the shifted address mask. */
329 _mask = size_or_mask | (_mask >> PAGE_SHIFT);
330
331 /* This works correctly if size is a power of two, i.e. a
332 contiguous range. */
333 *size = -(uint32_t)_mask;
334 *base = _base >> PAGE_SHIFT;
335 *type = _base & 0xff;
336 }
337
338 /**
339 * Checks and updates the fixed-range MTRRs if they differ from the saved set
340 * \param frs pointer to fixed-range MTRR values, saved by get_fixed_ranges()
341 */
set_fixed_ranges(mtrr_type * frs)342 static bool set_fixed_ranges(mtrr_type *frs)
343 {
344 unsigned long long *saved = (unsigned long long *) frs;
345 bool changed = false;
346 int block=-1, range;
347
348 while (fixed_range_blocks[++block].ranges)
349 for (range=0; range < fixed_range_blocks[block].ranges; range++)
350 set_fixed_range(fixed_range_blocks[block].base_msr + range,
351 &changed, (unsigned int *) saved++);
352
353 return changed;
354 }
355
356 /* Set the MSR pair relating to a var range. Returns true if
357 changes are made */
set_mtrr_var_ranges(unsigned int index,struct mtrr_var_range * vr)358 static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
359 {
360 uint32_t lo, hi, base_lo, base_hi, mask_lo, mask_hi;
361 uint64_t msr_content;
362 bool changed = false;
363
364 rdmsrl(MSR_IA32_MTRR_PHYSBASE(index), msr_content);
365 lo = (uint32_t)msr_content;
366 hi = (uint32_t)(msr_content >> 32);
367 base_lo = (uint32_t)vr->base;
368 base_hi = (uint32_t)(vr->base >> 32);
369
370 lo &= 0xfffff0ffUL;
371 base_lo &= 0xfffff0ffUL;
372 hi &= size_and_mask >> (32 - PAGE_SHIFT);
373 base_hi &= size_and_mask >> (32 - PAGE_SHIFT);
374
375 if ((base_lo != lo) || (base_hi != hi)) {
376 mtrr_wrmsr(MSR_IA32_MTRR_PHYSBASE(index), vr->base);
377 changed = true;
378 }
379
380 rdmsrl(MSR_IA32_MTRR_PHYSMASK(index), msr_content);
381 lo = (uint32_t)msr_content;
382 hi = (uint32_t)(msr_content >> 32);
383 mask_lo = (uint32_t)vr->mask;
384 mask_hi = (uint32_t)(vr->mask >> 32);
385
386 lo &= 0xfffff800UL;
387 mask_lo &= 0xfffff800UL;
388 hi &= size_and_mask >> (32 - PAGE_SHIFT);
389 mask_hi &= size_and_mask >> (32 - PAGE_SHIFT);
390
391 if ((mask_lo != lo) || (mask_hi != hi)) {
392 mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(index), vr->mask);
393 changed = true;
394 }
395 return changed;
396 }
397
398 static uint64_t deftype;
399
set_mtrr_state(void)400 static unsigned long set_mtrr_state(void)
401 /* [SUMMARY] Set the MTRR state for this CPU.
402 <state> The MTRR state information to read.
403 <ctxt> Some relevant CPU context.
404 [NOTE] The CPU must already be in a safe state for MTRR changes.
405 [RETURNS] 0 if no changes made, else a mask indication what was changed.
406 */
407 {
408 unsigned int i;
409 unsigned long change_mask = 0;
410
411 for (i = 0; i < num_var_ranges; i++)
412 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
413 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
414
415 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
416 change_mask |= MTRR_CHANGE_MASK_FIXED;
417
418 /* Set_mtrr_restore restores the old value of MTRRdefType,
419 so to set it we fiddle with the saved value */
420 if ((deftype & 0xff) != mtrr_state.def_type
421 || MASK_EXTR(deftype, MTRRdefType_E) != mtrr_state.enabled
422 || MASK_EXTR(deftype, MTRRdefType_FE) != mtrr_state.fixed_enabled) {
423 deftype = (deftype & ~0xcff) | mtrr_state.def_type |
424 MASK_INSR(mtrr_state.enabled, MTRRdefType_E) |
425 MASK_INSR(mtrr_state.fixed_enabled, MTRRdefType_FE);
426 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
427 }
428
429 return change_mask;
430 }
431
432
433 static DEFINE_SPINLOCK(set_atomicity_lock);
434
435 /*
436 * Since we are disabling the cache don't allow any interrupts - they
437 * would run extremely slow and would only increase the pain. The caller must
438 * ensure that local interrupts are disabled and are reenabled after post_set()
439 * has been called.
440 */
441
prepare_set(void)442 static bool prepare_set(void)
443 {
444 unsigned long cr4;
445
446 /* Note that this is not ideal, since the cache is only flushed/disabled
447 for this CPU while the MTRRs are changed, but changing this requires
448 more invasive changes to the way the kernel boots */
449
450 spin_lock(&set_atomicity_lock);
451
452 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
453 write_cr0(read_cr0() | X86_CR0_CD);
454
455 /*
456 * Cache flushing is the most time-consuming step when programming
457 * the MTRRs. Fortunately, as per the Intel Software Development
458 * Manual, we can skip it if the processor supports cache self-
459 * snooping.
460 */
461 alternative("wbinvd", "", X86_FEATURE_XEN_SELFSNOOP);
462
463 cr4 = read_cr4();
464 if (cr4 & X86_CR4_PGE)
465 write_cr4(cr4 & ~X86_CR4_PGE);
466 else if (use_invpcid)
467 invpcid_flush_all();
468 else
469 write_cr3(read_cr3());
470
471 /* Save MTRR state */
472 rdmsrl(MSR_MTRRdefType, deftype);
473
474 /* Disable MTRRs, and set the default type to uncached */
475 mtrr_wrmsr(MSR_MTRRdefType, deftype & ~0xcff);
476
477 /* Again, only flush caches if we have to. */
478 alternative("wbinvd", "", X86_FEATURE_XEN_SELFSNOOP);
479
480 return cr4 & X86_CR4_PGE;
481 }
482
post_set(bool pge)483 static void post_set(bool pge)
484 {
485 /* Intel (P6) standard MTRRs */
486 mtrr_wrmsr(MSR_MTRRdefType, deftype);
487
488 /* Enable caches */
489 write_cr0(read_cr0() & ~X86_CR0_CD);
490
491 /* Reenable CR4.PGE (also flushes the TLB) */
492 if (pge)
493 write_cr4(read_cr4() | X86_CR4_PGE);
494 else if (use_invpcid)
495 invpcid_flush_all();
496 else
497 write_cr3(read_cr3());
498
499 spin_unlock(&set_atomicity_lock);
500 }
501
generic_set_all(void)502 static void generic_set_all(void)
503 {
504 unsigned long mask, count;
505 unsigned long flags;
506 bool pge;
507
508 local_irq_save(flags);
509 pge = prepare_set();
510
511 /* Actually set the state */
512 mask = set_mtrr_state();
513
514 post_set(pge);
515 local_irq_restore(flags);
516
517 /* Use the atomic bitops to update the global mask */
518 for (count = 0; count < sizeof mask * 8; ++count) {
519 if (mask & 0x01)
520 set_bit(count, &smp_changes_mask);
521 mask >>= 1;
522 }
523 }
524
generic_set_mtrr(unsigned int reg,unsigned long base,unsigned long size,mtrr_type type)525 static void generic_set_mtrr(unsigned int reg, unsigned long base,
526 unsigned long size, mtrr_type type)
527 /* [SUMMARY] Set variable MTRR register on the local CPU.
528 <reg> The register to set.
529 <base> The base address of the region.
530 <size> The size of the region. If this is 0 the region is disabled.
531 <type> The type of the region.
532 <do_safe> If true, do the change safely. If false, safety measures should
533 be done externally.
534 [RETURNS] Nothing.
535 */
536 {
537 unsigned long flags;
538 struct mtrr_var_range *vr;
539 bool pge;
540
541 vr = &mtrr_state.var_ranges[reg];
542
543 local_irq_save(flags);
544 pge = prepare_set();
545
546 if (size == 0) {
547 /* The invalid bit is kept in the mask, so we simply clear the
548 relevant mask register to disable a range. */
549 memset(vr, 0, sizeof(*vr));
550 mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(reg), 0);
551 } else {
552 uint32_t base_lo, base_hi, mask_lo, mask_hi;
553
554 base_lo = base << PAGE_SHIFT | type;
555 base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
556 mask_lo = (-size << PAGE_SHIFT) | MTRR_PHYSMASK_VALID;
557 mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
558 vr->base = ((uint64_t)base_hi << 32) | base_lo;
559 vr->mask = ((uint64_t)mask_hi << 32) | mask_lo;
560
561 mtrr_wrmsr(MSR_IA32_MTRR_PHYSBASE(reg), vr->base);
562 mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(reg), vr->mask);
563 }
564
565 post_set(pge);
566 local_irq_restore(flags);
567 }
568
generic_validate_add_page(unsigned long base,unsigned long size,unsigned int type)569 int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
570 {
571 unsigned long lbase, last;
572
573 /* For Intel PPro stepping <= 7, must be 4 MiB aligned
574 and not touch 0x70000000->0x7003FFFF */
575 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
576 boot_cpu_data.x86_model == 1 &&
577 boot_cpu_data.x86_mask <= 7) {
578 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
579 printk(KERN_WARNING "mtrr: base(%#lx000) is not 4 MiB aligned\n", base);
580 return -EINVAL;
581 }
582 if (!(base + size < 0x70000 || base > 0x7003F) &&
583 (type == MTRR_TYPE_WRCOMB
584 || type == MTRR_TYPE_WRBACK)) {
585 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
586 return -EINVAL;
587 }
588 }
589
590 /* Check upper bits of base and last are equal and lower bits are 0
591 for base and 1 for last */
592 last = base + size - 1;
593 for (lbase = base; !(lbase & 1) && (last & 1);
594 lbase = lbase >> 1, last = last >> 1) ;
595 if (lbase != last) {
596 printk(KERN_WARNING "mtrr: base(%#lx000) is not aligned on a size(%#lx000) boundary\n",
597 base, size);
598 return -EINVAL;
599 }
600 return 0;
601 }
602
603
generic_have_wrcomb(void)604 static int generic_have_wrcomb(void)
605 {
606 unsigned long config;
607 rdmsrl(MSR_MTRRcap, config);
608 return (config & (1ULL << 10));
609 }
610
611 /* generic structure...
612 */
613 const struct mtrr_ops generic_mtrr_ops = {
614 .use_intel_if = true,
615 .set_all = generic_set_all,
616 .get = generic_get_mtrr,
617 .get_free_region = generic_get_free_region,
618 .set = generic_set_mtrr,
619 .validate_add_page = generic_validate_add_page,
620 .have_wrcomb = generic_have_wrcomb,
621 };
622