0001
0002
0003
0004
0005
0006 #ifndef _LINUX_IO_MAPPING_H
0007 #define _LINUX_IO_MAPPING_H
0008
0009 #include <linux/types.h>
0010 #include <linux/slab.h>
0011 #include <linux/bug.h>
0012 #include <linux/io.h>
0013 #include <linux/pgtable.h>
0014 #include <asm/page.h>
0015
0016
0017
0018
0019
0020
0021
0022
0023 struct io_mapping {
0024 resource_size_t base;
0025 unsigned long size;
0026 pgprot_t prot;
0027 void __iomem *iomem;
0028 };
0029
0030 #ifdef CONFIG_HAVE_ATOMIC_IOMAP
0031
0032 #include <linux/pfn.h>
0033 #include <asm/iomap.h>
0034
0035
0036
0037
0038
0039
0040
0041 static inline struct io_mapping *
0042 io_mapping_init_wc(struct io_mapping *iomap,
0043 resource_size_t base,
0044 unsigned long size)
0045 {
0046 pgprot_t prot;
0047
0048 if (iomap_create_wc(base, size, &prot))
0049 return NULL;
0050
0051 iomap->base = base;
0052 iomap->size = size;
0053 iomap->prot = prot;
0054 return iomap;
0055 }
0056
0057 static inline void
0058 io_mapping_fini(struct io_mapping *mapping)
0059 {
0060 iomap_free(mapping->base, mapping->size);
0061 }
0062
0063
0064 static inline void __iomem *
0065 io_mapping_map_atomic_wc(struct io_mapping *mapping,
0066 unsigned long offset)
0067 {
0068 resource_size_t phys_addr;
0069
0070 BUG_ON(offset >= mapping->size);
0071 phys_addr = mapping->base + offset;
0072 preempt_disable();
0073 pagefault_disable();
0074 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
0075 }
0076
0077 static inline void
0078 io_mapping_unmap_atomic(void __iomem *vaddr)
0079 {
0080 kunmap_local_indexed((void __force *)vaddr);
0081 pagefault_enable();
0082 preempt_enable();
0083 }
0084
0085 static inline void __iomem *
0086 io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
0087 {
0088 resource_size_t phys_addr;
0089
0090 BUG_ON(offset >= mapping->size);
0091 phys_addr = mapping->base + offset;
0092 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
0093 }
0094
0095 static inline void io_mapping_unmap_local(void __iomem *vaddr)
0096 {
0097 kunmap_local_indexed((void __force *)vaddr);
0098 }
0099
0100 static inline void __iomem *
0101 io_mapping_map_wc(struct io_mapping *mapping,
0102 unsigned long offset,
0103 unsigned long size)
0104 {
0105 resource_size_t phys_addr;
0106
0107 BUG_ON(offset >= mapping->size);
0108 phys_addr = mapping->base + offset;
0109
0110 return ioremap_wc(phys_addr, size);
0111 }
0112
0113 static inline void
0114 io_mapping_unmap(void __iomem *vaddr)
0115 {
0116 iounmap(vaddr);
0117 }
0118
0119 #else
0120
0121 #include <linux/uaccess.h>
0122
0123
0124 static inline struct io_mapping *
0125 io_mapping_init_wc(struct io_mapping *iomap,
0126 resource_size_t base,
0127 unsigned long size)
0128 {
0129 iomap->iomem = ioremap_wc(base, size);
0130 if (!iomap->iomem)
0131 return NULL;
0132
0133 iomap->base = base;
0134 iomap->size = size;
0135 iomap->prot = pgprot_writecombine(PAGE_KERNEL);
0136
0137 return iomap;
0138 }
0139
0140 static inline void
0141 io_mapping_fini(struct io_mapping *mapping)
0142 {
0143 iounmap(mapping->iomem);
0144 }
0145
0146
0147 static inline void __iomem *
0148 io_mapping_map_wc(struct io_mapping *mapping,
0149 unsigned long offset,
0150 unsigned long size)
0151 {
0152 return mapping->iomem + offset;
0153 }
0154
0155 static inline void
0156 io_mapping_unmap(void __iomem *vaddr)
0157 {
0158 }
0159
0160
0161 static inline void __iomem *
0162 io_mapping_map_atomic_wc(struct io_mapping *mapping,
0163 unsigned long offset)
0164 {
0165 preempt_disable();
0166 pagefault_disable();
0167 return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
0168 }
0169
0170 static inline void
0171 io_mapping_unmap_atomic(void __iomem *vaddr)
0172 {
0173 io_mapping_unmap(vaddr);
0174 pagefault_enable();
0175 preempt_enable();
0176 }
0177
0178 static inline void __iomem *
0179 io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
0180 {
0181 return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
0182 }
0183
0184 static inline void io_mapping_unmap_local(void __iomem *vaddr)
0185 {
0186 io_mapping_unmap(vaddr);
0187 }
0188
0189 #endif
0190
0191 static inline struct io_mapping *
0192 io_mapping_create_wc(resource_size_t base,
0193 unsigned long size)
0194 {
0195 struct io_mapping *iomap;
0196
0197 iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
0198 if (!iomap)
0199 return NULL;
0200
0201 if (!io_mapping_init_wc(iomap, base, size)) {
0202 kfree(iomap);
0203 return NULL;
0204 }
0205
0206 return iomap;
0207 }
0208
0209 static inline void
0210 io_mapping_free(struct io_mapping *iomap)
0211 {
0212 io_mapping_fini(iomap);
0213 kfree(iomap);
0214 }
0215
0216 #endif
0217
0218 int io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma,
0219 unsigned long addr, unsigned long pfn, unsigned long size);