1 #include "private.h"
2
3 #include <xen/lib/x86/msr.h>
4
5 /*
6 * Copy a single MSR into the provided msr_entry_buffer_t buffer, performing a
7 * boundary check against the buffer size.
8 */
copy_msr_to_buffer(uint32_t idx,uint64_t val,msr_entry_buffer_t msrs,uint32_t * curr_entry,const uint32_t nr_entries)9 static int copy_msr_to_buffer(uint32_t idx, uint64_t val,
10 msr_entry_buffer_t msrs,
11 uint32_t *curr_entry, const uint32_t nr_entries)
12 {
13 const xen_msr_entry_t ent = { .idx = idx, .val = val };
14
15 if ( *curr_entry == nr_entries )
16 return -ENOBUFS;
17
18 if ( copy_to_buffer_offset(msrs, *curr_entry, &ent, 1) )
19 return -EFAULT;
20
21 ++*curr_entry;
22
23 return 0;
24 }
25
x86_msr_copy_to_buffer(const struct msr_policy * p,msr_entry_buffer_t msrs,uint32_t * nr_entries_p)26 int x86_msr_copy_to_buffer(const struct msr_policy *p,
27 msr_entry_buffer_t msrs, uint32_t *nr_entries_p)
28 {
29 const uint32_t nr_entries = *nr_entries_p;
30 uint32_t curr_entry = 0;
31
32 #define COPY_MSR(idx, val) \
33 ({ \
34 int ret; \
35 \
36 if ( (ret = copy_msr_to_buffer( \
37 idx, val, msrs, &curr_entry, nr_entries)) ) \
38 return ret; \
39 })
40
41 COPY_MSR(MSR_INTEL_PLATFORM_INFO, p->platform_info.raw);
42 COPY_MSR(MSR_ARCH_CAPABILITIES, p->arch_caps.raw);
43
44 #undef COPY_MSR
45
46 *nr_entries_p = curr_entry;
47
48 return 0;
49 }
50
x86_msr_copy_from_buffer(struct msr_policy * p,const msr_entry_buffer_t msrs,uint32_t nr_entries,uint32_t * err_msr)51 int x86_msr_copy_from_buffer(struct msr_policy *p,
52 const msr_entry_buffer_t msrs, uint32_t nr_entries,
53 uint32_t *err_msr)
54 {
55 unsigned int i;
56 xen_msr_entry_t data;
57 int rc;
58
59 if ( err_msr )
60 *err_msr = -1;
61
62 /*
63 * A well formed caller is expected to pass an array with entries in
64 * order, and without any repetitions. However, due to per-vendor
65 * differences, and in the case of upgrade or levelled scenarios, we
66 * typically expect fewer than MAX entries to be passed.
67 *
68 * Detecting repeated entries is prohibitively complicated, so we don't
69 * bother. That said, one way or another if more than MAX entries are
70 * passed, something is wrong.
71 */
72 if ( nr_entries > MSR_MAX_SERIALISED_ENTRIES )
73 return -E2BIG;
74
75 for ( i = 0; i < nr_entries; i++ )
76 {
77 if ( copy_from_buffer_offset(&data, msrs, i, 1) )
78 return -EFAULT;
79
80 if ( data.flags ) /* .flags MBZ */
81 {
82 rc = -EINVAL;
83 goto err;
84 }
85
86 switch ( data.idx )
87 {
88 /*
89 * Assign data.val to p->field, checking for truncation if the
90 * backing storage for field is smaller than uint64_t
91 */
92 #define ASSIGN(field) \
93 ({ \
94 if ( (typeof(p->field))data.val != data.val ) \
95 { \
96 rc = -EOVERFLOW; \
97 goto err; \
98 } \
99 p->field = data.val; \
100 })
101
102 case MSR_INTEL_PLATFORM_INFO: ASSIGN(platform_info.raw); break;
103 case MSR_ARCH_CAPABILITIES: ASSIGN(arch_caps.raw); break;
104
105 #undef ASSIGN
106
107 default:
108 rc = -ERANGE;
109 goto err;
110 }
111 }
112
113 return 0;
114
115 err:
116 if ( err_msr )
117 *err_msr = data.idx;
118
119 return rc;
120 }
121
122 /*
123 * Local variables:
124 * mode: C
125 * c-file-style: "BSD"
126 * c-basic-offset: 4
127 * tab-width: 4
128 * indent-tabs-mode: nil
129 * End:
130 */
131