1 /******************************************************************************
2 * Original code extracted from arch/x86/x86_64/mm.c
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <xen/init.h>
19 #include <xen/mm.h>
20 #include <xen/bitops.h>
21 #include <xen/nospec.h>
22
23 /* Parameters for PFN/MADDR compression. */
24 unsigned long __read_mostly max_pdx;
25 unsigned long __read_mostly pfn_pdx_bottom_mask = ~0UL;
26 unsigned long __read_mostly ma_va_bottom_mask = ~0UL;
27 unsigned long __read_mostly pfn_top_mask = 0;
28 unsigned long __read_mostly ma_top_mask = 0;
29 unsigned long __read_mostly pfn_hole_mask = 0;
30 unsigned int __read_mostly pfn_pdx_hole_shift = 0;
31
32 unsigned long __read_mostly pdx_group_valid[BITS_TO_LONGS(
33 (FRAMETABLE_NR + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT)] = { [0] = 1 };
34
__mfn_valid(unsigned long mfn)35 bool __mfn_valid(unsigned long mfn)
36 {
37 if ( unlikely(evaluate_nospec(mfn >= max_page)) )
38 return false;
39 return likely(!(mfn & pfn_hole_mask)) &&
40 likely(test_bit(pfn_to_pdx(mfn) / PDX_GROUP_COUNT,
41 pdx_group_valid));
42 }
43
44 /* Sets all bits from the most-significant 1-bit down to the LSB */
fill_mask(u64 mask)45 static u64 __init fill_mask(u64 mask)
46 {
47 while (mask & (mask + 1))
48 mask |= mask + 1;
49
50 return mask;
51 }
52
53 /* We don't want to compress the low MAX_ORDER bits of the addresses. */
pdx_init_mask(uint64_t base_addr)54 uint64_t __init pdx_init_mask(uint64_t base_addr)
55 {
56 return fill_mask(max(base_addr,
57 (uint64_t)1 << (MAX_ORDER + PAGE_SHIFT)) - 1);
58 }
59
pdx_region_mask(u64 base,u64 len)60 u64 __init pdx_region_mask(u64 base, u64 len)
61 {
62 return fill_mask(base ^ (base + len - 1));
63 }
64
set_pdx_range(unsigned long smfn,unsigned long emfn)65 void set_pdx_range(unsigned long smfn, unsigned long emfn)
66 {
67 unsigned long idx, eidx;
68
69 idx = pfn_to_pdx(smfn) / PDX_GROUP_COUNT;
70 eidx = (pfn_to_pdx(emfn - 1) + PDX_GROUP_COUNT) / PDX_GROUP_COUNT;
71
72 for ( ; idx < eidx; ++idx )
73 __set_bit(idx, pdx_group_valid);
74 }
75
pfn_pdx_hole_setup(unsigned long mask)76 void __init pfn_pdx_hole_setup(unsigned long mask)
77 {
78 unsigned int i, j, bottom_shift = 0, hole_shift = 0;
79
80 /*
81 * We skip the first MAX_ORDER bits, as we never want to compress them.
82 * This guarantees that page-pointer arithmetic remains valid within
83 * contiguous aligned ranges of 2^MAX_ORDER pages. Among others, our
84 * buddy allocator relies on this assumption.
85 *
86 * If the logic changes here, we might have to update the ARM specific
87 * init_pdx too.
88 */
89 for ( j = MAX_ORDER-1; ; )
90 {
91 i = find_next_zero_bit(&mask, BITS_PER_LONG, j + 1);
92 if ( i >= BITS_PER_LONG )
93 break;
94 j = find_next_bit(&mask, BITS_PER_LONG, i + 1);
95 if ( j >= BITS_PER_LONG )
96 break;
97 if ( j - i > hole_shift )
98 {
99 hole_shift = j - i;
100 bottom_shift = i;
101 }
102 }
103 if ( !hole_shift )
104 return;
105
106 printk(KERN_INFO "PFN compression on bits %u...%u\n",
107 bottom_shift, bottom_shift + hole_shift - 1);
108
109 pfn_pdx_hole_shift = hole_shift;
110 pfn_pdx_bottom_mask = (1UL << bottom_shift) - 1;
111 ma_va_bottom_mask = (PAGE_SIZE << bottom_shift) - 1;
112 pfn_hole_mask = ((1UL << hole_shift) - 1) << bottom_shift;
113 pfn_top_mask = ~(pfn_pdx_bottom_mask | pfn_hole_mask);
114 ma_top_mask = pfn_top_mask << PAGE_SHIFT;
115 }
116
117
118 /*
119 * Local variables:
120 * mode: C
121 * c-file-style: "BSD"
122 * c-basic-offset: 4
123 * indent-tabs-mode: nil
124 * End:
125 */
126