1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014-2016, Intel Corporation.
4  */
5 #include "test/nfit_test.h"
6 #include <linux/blkdev.h>
7 #include <pmem.h>
8 #include <nd.h>
9 
__pmem_direct_access(struct pmem_device * pmem,pgoff_t pgoff,long nr_pages,void ** kaddr,pfn_t * pfn)10 long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
11 		long nr_pages, void **kaddr, pfn_t *pfn)
12 {
13 	resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
14 
15 	if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
16 					PFN_PHYS(nr_pages))))
17 		return -EIO;
18 
19 	/*
20 	 * Limit dax to a single page at a time given vmalloc()-backed
21 	 * in the nfit_test case.
22 	 */
23 	if (get_nfit_res(pmem->phys_addr + offset)) {
24 		struct page *page;
25 
26 		if (kaddr)
27 			*kaddr = pmem->virt_addr + offset;
28 		page = vmalloc_to_page(pmem->virt_addr + offset);
29 		if (pfn)
30 			*pfn = page_to_pfn_t(page);
31 		pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
32 				__func__, pmem, pgoff, page_to_pfn(page));
33 
34 		return 1;
35 	}
36 
37 	if (kaddr)
38 		*kaddr = pmem->virt_addr + offset;
39 	if (pfn)
40 		*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
41 
42 	/*
43 	 * If badblocks are present, limit known good range to the
44 	 * requested range.
45 	 */
46 	if (unlikely(pmem->bb.count))
47 		return nr_pages;
48 	return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
49 }
50