Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright © 2014 Intel Corporation
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice (including the next
0012  * paragraph) shall be included in all copies or substantial portions of the
0013  * Software.
0014  *
0015  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0016  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0017  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0018  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
0019  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
0020  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
0021  * IN THE SOFTWARE.
0022  *
0023  */
0024 
0025 #include <linux/mm.h>
0026 #include <linux/io-mapping.h>
0027 
0028 
0029 #include "i915_drv.h"
0030 #include "i915_mm.h"
0031 
0032 struct remap_pfn {
0033     struct mm_struct *mm;
0034     unsigned long pfn;
0035     pgprot_t prot;
0036 
0037     struct sgt_iter sgt;
0038     resource_size_t iobase;
0039 };
0040 
0041 #define use_dma(io) ((io) != -1)
0042 
0043 static inline unsigned long sgt_pfn(const struct remap_pfn *r)
0044 {
0045     if (use_dma(r->iobase))
0046         return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
0047     else
0048         return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
0049 }
0050 
0051 static int remap_sg(pte_t *pte, unsigned long addr, void *data)
0052 {
0053     struct remap_pfn *r = data;
0054 
0055     if (GEM_WARN_ON(!r->sgt.sgp))
0056         return -EINVAL;
0057 
0058     /* Special PTE are not associated with any struct page */
0059     set_pte_at(r->mm, addr, pte,
0060            pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
0061     r->pfn++; /* track insertions in case we need to unwind later */
0062 
0063     r->sgt.curr += PAGE_SIZE;
0064     if (r->sgt.curr >= r->sgt.max)
0065         r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
0066 
0067     return 0;
0068 }
0069 
0070 #define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
0071 
0072 #if IS_ENABLED(CONFIG_X86)
0073 static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
0074 {
0075     struct remap_pfn *r = data;
0076 
0077     /* Special PTE are not associated with any struct page */
0078     set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
0079     r->pfn++;
0080 
0081     return 0;
0082 }
0083 
0084 /**
0085  * remap_io_mapping - remap an IO mapping to userspace
0086  * @vma: user vma to map to
0087  * @addr: target user address to start at
0088  * @pfn: physical address of kernel memory
0089  * @size: size of map area
0090  * @iomap: the source io_mapping
0091  *
0092  *  Note: this is only safe if the mm semaphore is held when called.
0093  */
0094 int remap_io_mapping(struct vm_area_struct *vma,
0095              unsigned long addr, unsigned long pfn, unsigned long size,
0096              struct io_mapping *iomap)
0097 {
0098     struct remap_pfn r;
0099     int err;
0100 
0101     GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
0102 
0103     /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
0104     r.mm = vma->vm_mm;
0105     r.pfn = pfn;
0106     r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
0107               (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
0108 
0109     err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
0110     if (unlikely(err)) {
0111         zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
0112         return err;
0113     }
0114 
0115     return 0;
0116 }
0117 #endif
0118 
0119 /**
0120  * remap_io_sg - remap an IO mapping to userspace
0121  * @vma: user vma to map to
0122  * @addr: target user address to start at
0123  * @size: size of map area
0124  * @sgl: Start sg entry
0125  * @iobase: Use stored dma address offset by this address or pfn if -1
0126  *
0127  *  Note: this is only safe if the mm semaphore is held when called.
0128  */
0129 int remap_io_sg(struct vm_area_struct *vma,
0130         unsigned long addr, unsigned long size,
0131         struct scatterlist *sgl, resource_size_t iobase)
0132 {
0133     struct remap_pfn r = {
0134         .mm = vma->vm_mm,
0135         .prot = vma->vm_page_prot,
0136         .sgt = __sgt_iter(sgl, use_dma(iobase)),
0137         .iobase = iobase,
0138     };
0139     int err;
0140 
0141     /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
0142     GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
0143 
0144     if (!use_dma(iobase))
0145         flush_cache_range(vma, addr, size);
0146 
0147     err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
0148     if (unlikely(err)) {
0149         zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
0150         return err;
0151     }
0152 
0153     return 0;
0154 }