0001
0002
0003
0004
0005 #include "test/nfit_test.h"
0006 #include <linux/blkdev.h>
0007 #include <linux/dax.h>
0008 #include <pmem.h>
0009 #include <nd.h>
0010
0011 long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
0012 long nr_pages, enum dax_access_mode mode, void **kaddr,
0013 pfn_t *pfn)
0014 {
0015 resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
0016
0017 if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
0018 PFN_PHYS(nr_pages))))
0019 return -EIO;
0020
0021
0022
0023
0024
0025 if (get_nfit_res(pmem->phys_addr + offset)) {
0026 struct page *page;
0027
0028 if (kaddr)
0029 *kaddr = pmem->virt_addr + offset;
0030 page = vmalloc_to_page(pmem->virt_addr + offset);
0031 if (pfn)
0032 *pfn = page_to_pfn_t(page);
0033 pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
0034 __func__, pmem, pgoff, page_to_pfn(page));
0035
0036 return 1;
0037 }
0038
0039 if (kaddr)
0040 *kaddr = pmem->virt_addr + offset;
0041 if (pfn)
0042 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
0043
0044
0045
0046
0047
0048 if (unlikely(pmem->bb.count))
0049 return nr_pages;
0050 return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
0051 }