1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 */
5 #include <linux/memremap.h>
6 #include <linux/rculist.h>
7 #include <linux/export.h>
8 #include <linux/ioport.h>
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/pfn_t.h>
12 #include <linux/acpi.h>
13 #include <linux/io.h>
14 #include <linux/mm.h>
15 #include "nfit_test.h"
16
17 static LIST_HEAD(iomap_head);
18
19 static struct iomap_ops {
20 nfit_test_lookup_fn nfit_test_lookup;
21 nfit_test_evaluate_dsm_fn evaluate_dsm;
22 struct list_head list;
23 } iomap_ops = {
24 .list = LIST_HEAD_INIT(iomap_ops.list),
25 };
26
nfit_test_setup(nfit_test_lookup_fn lookup,nfit_test_evaluate_dsm_fn evaluate)27 void nfit_test_setup(nfit_test_lookup_fn lookup,
28 nfit_test_evaluate_dsm_fn evaluate)
29 {
30 iomap_ops.nfit_test_lookup = lookup;
31 iomap_ops.evaluate_dsm = evaluate;
32 list_add_rcu(&iomap_ops.list, &iomap_head);
33 }
34 EXPORT_SYMBOL(nfit_test_setup);
35
nfit_test_teardown(void)36 void nfit_test_teardown(void)
37 {
38 list_del_rcu(&iomap_ops.list);
39 synchronize_rcu();
40 }
41 EXPORT_SYMBOL(nfit_test_teardown);
42
__get_nfit_res(resource_size_t resource)43 static struct nfit_test_resource *__get_nfit_res(resource_size_t resource)
44 {
45 struct iomap_ops *ops;
46
47 ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
48 if (ops)
49 return ops->nfit_test_lookup(resource);
50 return NULL;
51 }
52
get_nfit_res(resource_size_t resource)53 struct nfit_test_resource *get_nfit_res(resource_size_t resource)
54 {
55 struct nfit_test_resource *res;
56
57 rcu_read_lock();
58 res = __get_nfit_res(resource);
59 rcu_read_unlock();
60
61 return res;
62 }
63 EXPORT_SYMBOL(get_nfit_res);
64
__nfit_test_ioremap(resource_size_t offset,unsigned long size,void __iomem * (* fallback_fn)(resource_size_t,unsigned long))65 static void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
66 void __iomem *(*fallback_fn)(resource_size_t, unsigned long))
67 {
68 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
69
70 if (nfit_res)
71 return (void __iomem *) nfit_res->buf + offset
72 - nfit_res->res.start;
73 return fallback_fn(offset, size);
74 }
75
__wrap_devm_ioremap(struct device * dev,resource_size_t offset,unsigned long size)76 void __iomem *__wrap_devm_ioremap(struct device *dev,
77 resource_size_t offset, unsigned long size)
78 {
79 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
80
81 if (nfit_res)
82 return (void __iomem *) nfit_res->buf + offset
83 - nfit_res->res.start;
84 return devm_ioremap(dev, offset, size);
85 }
86 EXPORT_SYMBOL(__wrap_devm_ioremap);
87
__wrap_devm_memremap(struct device * dev,resource_size_t offset,size_t size,unsigned long flags)88 void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
89 size_t size, unsigned long flags)
90 {
91 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
92
93 if (nfit_res)
94 return nfit_res->buf + offset - nfit_res->res.start;
95 return devm_memremap(dev, offset, size, flags);
96 }
97 EXPORT_SYMBOL(__wrap_devm_memremap);
98
nfit_test_kill(void * _pgmap)99 static void nfit_test_kill(void *_pgmap)
100 {
101 struct dev_pagemap *pgmap = _pgmap;
102
103 WARN_ON(!pgmap || !pgmap->ref);
104
105 if (pgmap->ops && pgmap->ops->kill)
106 pgmap->ops->kill(pgmap);
107 else
108 percpu_ref_kill(pgmap->ref);
109
110 if (pgmap->ops && pgmap->ops->cleanup) {
111 pgmap->ops->cleanup(pgmap);
112 } else {
113 wait_for_completion(&pgmap->done);
114 percpu_ref_exit(pgmap->ref);
115 }
116 }
117
dev_pagemap_percpu_release(struct percpu_ref * ref)118 static void dev_pagemap_percpu_release(struct percpu_ref *ref)
119 {
120 struct dev_pagemap *pgmap =
121 container_of(ref, struct dev_pagemap, internal_ref);
122
123 complete(&pgmap->done);
124 }
125
__wrap_devm_memremap_pages(struct device * dev,struct dev_pagemap * pgmap)126 void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
127 {
128 int error;
129 resource_size_t offset = pgmap->range.start;
130 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
131
132 if (!nfit_res)
133 return devm_memremap_pages(dev, pgmap);
134
135 if (!pgmap->ref) {
136 if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
137 return ERR_PTR(-EINVAL);
138
139 init_completion(&pgmap->done);
140 error = percpu_ref_init(&pgmap->internal_ref,
141 dev_pagemap_percpu_release, 0, GFP_KERNEL);
142 if (error)
143 return ERR_PTR(error);
144 pgmap->ref = &pgmap->internal_ref;
145 } else {
146 if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
147 WARN(1, "Missing reference count teardown definition\n");
148 return ERR_PTR(-EINVAL);
149 }
150 }
151
152 error = devm_add_action_or_reset(dev, nfit_test_kill, pgmap);
153 if (error)
154 return ERR_PTR(error);
155 return nfit_res->buf + offset - nfit_res->res.start;
156 }
157 EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
158
__wrap_phys_to_pfn_t(phys_addr_t addr,unsigned long flags)159 pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
160 {
161 struct nfit_test_resource *nfit_res = get_nfit_res(addr);
162
163 if (nfit_res)
164 flags &= ~PFN_MAP;
165 return phys_to_pfn_t(addr, flags);
166 }
167 EXPORT_SYMBOL(__wrap_phys_to_pfn_t);
168
__wrap_memremap(resource_size_t offset,size_t size,unsigned long flags)169 void *__wrap_memremap(resource_size_t offset, size_t size,
170 unsigned long flags)
171 {
172 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
173
174 if (nfit_res)
175 return nfit_res->buf + offset - nfit_res->res.start;
176 return memremap(offset, size, flags);
177 }
178 EXPORT_SYMBOL(__wrap_memremap);
179
__wrap_devm_memunmap(struct device * dev,void * addr)180 void __wrap_devm_memunmap(struct device *dev, void *addr)
181 {
182 struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
183
184 if (nfit_res)
185 return;
186 return devm_memunmap(dev, addr);
187 }
188 EXPORT_SYMBOL(__wrap_devm_memunmap);
189
__wrap_ioremap(resource_size_t offset,unsigned long size)190 void __iomem *__wrap_ioremap(resource_size_t offset, unsigned long size)
191 {
192 return __nfit_test_ioremap(offset, size, ioremap);
193 }
194 EXPORT_SYMBOL(__wrap_ioremap);
195
__wrap_ioremap_wc(resource_size_t offset,unsigned long size)196 void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size)
197 {
198 return __nfit_test_ioremap(offset, size, ioremap_wc);
199 }
200 EXPORT_SYMBOL(__wrap_ioremap_wc);
201
__wrap_iounmap(volatile void __iomem * addr)202 void __wrap_iounmap(volatile void __iomem *addr)
203 {
204 struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
205 if (nfit_res)
206 return;
207 return iounmap(addr);
208 }
209 EXPORT_SYMBOL(__wrap_iounmap);
210
__wrap_memunmap(void * addr)211 void __wrap_memunmap(void *addr)
212 {
213 struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
214
215 if (nfit_res)
216 return;
217 return memunmap(addr);
218 }
219 EXPORT_SYMBOL(__wrap_memunmap);
220
221 static bool nfit_test_release_region(struct device *dev,
222 struct resource *parent, resource_size_t start,
223 resource_size_t n);
224
nfit_devres_release(struct device * dev,void * data)225 static void nfit_devres_release(struct device *dev, void *data)
226 {
227 struct resource *res = *((struct resource **) data);
228
229 WARN_ON(!nfit_test_release_region(NULL, &iomem_resource, res->start,
230 resource_size(res)));
231 }
232
match(struct device * dev,void * __res,void * match_data)233 static int match(struct device *dev, void *__res, void *match_data)
234 {
235 struct resource *res = *((struct resource **) __res);
236 resource_size_t start = *((resource_size_t *) match_data);
237
238 return res->start == start;
239 }
240
nfit_test_release_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n)241 static bool nfit_test_release_region(struct device *dev,
242 struct resource *parent, resource_size_t start,
243 resource_size_t n)
244 {
245 if (parent == &iomem_resource) {
246 struct nfit_test_resource *nfit_res = get_nfit_res(start);
247
248 if (nfit_res) {
249 struct nfit_test_request *req;
250 struct resource *res = NULL;
251
252 if (dev) {
253 devres_release(dev, nfit_devres_release, match,
254 &start);
255 return true;
256 }
257
258 spin_lock(&nfit_res->lock);
259 list_for_each_entry(req, &nfit_res->requests, list)
260 if (req->res.start == start) {
261 res = &req->res;
262 list_del(&req->list);
263 break;
264 }
265 spin_unlock(&nfit_res->lock);
266
267 WARN(!res || resource_size(res) != n,
268 "%s: start: %llx n: %llx mismatch: %pr\n",
269 __func__, start, n, res);
270 if (res)
271 kfree(req);
272 return true;
273 }
274 }
275 return false;
276 }
277
nfit_test_request_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n,const char * name,int flags)278 static struct resource *nfit_test_request_region(struct device *dev,
279 struct resource *parent, resource_size_t start,
280 resource_size_t n, const char *name, int flags)
281 {
282 struct nfit_test_resource *nfit_res;
283
284 if (parent == &iomem_resource) {
285 nfit_res = get_nfit_res(start);
286 if (nfit_res) {
287 struct nfit_test_request *req;
288 struct resource *res = NULL;
289
290 if (start + n > nfit_res->res.start
291 + resource_size(&nfit_res->res)) {
292 pr_debug("%s: start: %llx n: %llx overflow: %pr\n",
293 __func__, start, n,
294 &nfit_res->res);
295 return NULL;
296 }
297
298 spin_lock(&nfit_res->lock);
299 list_for_each_entry(req, &nfit_res->requests, list)
300 if (start == req->res.start) {
301 res = &req->res;
302 break;
303 }
304 spin_unlock(&nfit_res->lock);
305
306 if (res) {
307 WARN(1, "%pr already busy\n", res);
308 return NULL;
309 }
310
311 req = kzalloc(sizeof(*req), GFP_KERNEL);
312 if (!req)
313 return NULL;
314 INIT_LIST_HEAD(&req->list);
315 res = &req->res;
316
317 res->start = start;
318 res->end = start + n - 1;
319 res->name = name;
320 res->flags = resource_type(parent);
321 res->flags |= IORESOURCE_BUSY | flags;
322 spin_lock(&nfit_res->lock);
323 list_add(&req->list, &nfit_res->requests);
324 spin_unlock(&nfit_res->lock);
325
326 if (dev) {
327 struct resource **d;
328
329 d = devres_alloc(nfit_devres_release,
330 sizeof(struct resource *),
331 GFP_KERNEL);
332 if (!d)
333 return NULL;
334 *d = res;
335 devres_add(dev, d);
336 }
337
338 pr_debug("%s: %pr\n", __func__, res);
339 return res;
340 }
341 }
342 if (dev)
343 return __devm_request_region(dev, parent, start, n, name);
344 return __request_region(parent, start, n, name, flags);
345 }
346
__wrap___request_region(struct resource * parent,resource_size_t start,resource_size_t n,const char * name,int flags)347 struct resource *__wrap___request_region(struct resource *parent,
348 resource_size_t start, resource_size_t n, const char *name,
349 int flags)
350 {
351 return nfit_test_request_region(NULL, parent, start, n, name, flags);
352 }
353 EXPORT_SYMBOL(__wrap___request_region);
354
__wrap_insert_resource(struct resource * parent,struct resource * res)355 int __wrap_insert_resource(struct resource *parent, struct resource *res)
356 {
357 if (get_nfit_res(res->start))
358 return 0;
359 return insert_resource(parent, res);
360 }
361 EXPORT_SYMBOL(__wrap_insert_resource);
362
__wrap_remove_resource(struct resource * res)363 int __wrap_remove_resource(struct resource *res)
364 {
365 if (get_nfit_res(res->start))
366 return 0;
367 return remove_resource(res);
368 }
369 EXPORT_SYMBOL(__wrap_remove_resource);
370
__wrap___devm_request_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n,const char * name)371 struct resource *__wrap___devm_request_region(struct device *dev,
372 struct resource *parent, resource_size_t start,
373 resource_size_t n, const char *name)
374 {
375 if (!dev)
376 return NULL;
377 return nfit_test_request_region(dev, parent, start, n, name, 0);
378 }
379 EXPORT_SYMBOL(__wrap___devm_request_region);
380
__wrap___release_region(struct resource * parent,resource_size_t start,resource_size_t n)381 void __wrap___release_region(struct resource *parent, resource_size_t start,
382 resource_size_t n)
383 {
384 if (!nfit_test_release_region(NULL, parent, start, n))
385 __release_region(parent, start, n);
386 }
387 EXPORT_SYMBOL(__wrap___release_region);
388
__wrap___devm_release_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n)389 void __wrap___devm_release_region(struct device *dev, struct resource *parent,
390 resource_size_t start, resource_size_t n)
391 {
392 if (!nfit_test_release_region(dev, parent, start, n))
393 __devm_release_region(dev, parent, start, n);
394 }
395 EXPORT_SYMBOL(__wrap___devm_release_region);
396
__wrap_acpi_evaluate_object(acpi_handle handle,acpi_string path,struct acpi_object_list * p,struct acpi_buffer * buf)397 acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path,
398 struct acpi_object_list *p, struct acpi_buffer *buf)
399 {
400 struct nfit_test_resource *nfit_res = get_nfit_res((long) handle);
401 union acpi_object **obj;
402
403 if (!nfit_res || strcmp(path, "_FIT") || !buf)
404 return acpi_evaluate_object(handle, path, p, buf);
405
406 obj = nfit_res->buf;
407 buf->length = sizeof(union acpi_object);
408 buf->pointer = *obj;
409 return AE_OK;
410 }
411 EXPORT_SYMBOL(__wrap_acpi_evaluate_object);
412
__wrap_acpi_evaluate_dsm(acpi_handle handle,const guid_t * guid,u64 rev,u64 func,union acpi_object * argv4)413 union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
414 u64 rev, u64 func, union acpi_object *argv4)
415 {
416 union acpi_object *obj = ERR_PTR(-ENXIO);
417 struct iomap_ops *ops;
418
419 rcu_read_lock();
420 ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
421 if (ops)
422 obj = ops->evaluate_dsm(handle, guid, rev, func, argv4);
423 rcu_read_unlock();
424
425 if (IS_ERR(obj))
426 return acpi_evaluate_dsm(handle, guid, rev, func, argv4);
427 return obj;
428 }
429 EXPORT_SYMBOL(__wrap_acpi_evaluate_dsm);
430
431 MODULE_LICENSE("GPL v2");
432