0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/export.h>
0010 #include <asm/addrspace.h>
0011 #include <asm/byteorder.h>
0012 #include <linux/ioport.h>
0013 #include <linux/sched.h>
0014 #include <linux/slab.h>
0015 #include <linux/vmalloc.h>
0016 #include <linux/mm_types.h>
0017 #include <linux/io.h>
0018 #include <asm/cacheflush.h>
0019 #include <asm/tlbflush.h>
0020 #include <ioremap.h>
0021
0022 #define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
0023 #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
0024
0025 static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
0026 void *arg)
0027 {
0028 unsigned long i;
0029
0030 for (i = 0; i < nr_pages; i++) {
0031 if (pfn_valid(start_pfn + i) &&
0032 !PageReserved(pfn_to_page(start_pfn + i)))
0033 return 1;
0034 }
0035
0036 return 0;
0037 }
0038
0039
0040
0041
0042
0043
0044
0045
0046 void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size,
0047 unsigned long prot_val)
0048 {
0049 unsigned long flags = prot_val & _CACHE_MASK;
0050 unsigned long offset, pfn, last_pfn;
0051 struct vm_struct *area;
0052 phys_addr_t last_addr;
0053 unsigned long vaddr;
0054 void __iomem *cpu_addr;
0055
0056 cpu_addr = plat_ioremap(phys_addr, size, flags);
0057 if (cpu_addr)
0058 return cpu_addr;
0059
0060 phys_addr = fixup_bigphys_addr(phys_addr, size);
0061
0062
0063 last_addr = phys_addr + size - 1;
0064 if (!size || last_addr < phys_addr)
0065 return NULL;
0066
0067
0068
0069
0070
0071 if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
0072 flags == _CACHE_UNCACHED)
0073 return (void __iomem *) CKSEG1ADDR(phys_addr);
0074
0075
0076
0077
0078
0079 pfn = PFN_DOWN(phys_addr);
0080 last_pfn = PFN_DOWN(last_addr);
0081 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
0082 __ioremap_check_ram) == 1) {
0083 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
0084 &phys_addr, &last_addr);
0085 return NULL;
0086 }
0087
0088
0089
0090
0091 offset = phys_addr & ~PAGE_MASK;
0092 phys_addr &= PAGE_MASK;
0093 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
0094
0095
0096
0097
0098 area = get_vm_area(size, VM_IOREMAP);
0099 if (!area)
0100 return NULL;
0101 vaddr = (unsigned long)area->addr;
0102
0103 flags |= _PAGE_GLOBAL | _PAGE_PRESENT | __READABLE | __WRITEABLE;
0104 if (ioremap_page_range(vaddr, vaddr + size, phys_addr,
0105 __pgprot(flags))) {
0106 free_vm_area(area);
0107 return NULL;
0108 }
0109
0110 return (void __iomem *)(vaddr + offset);
0111 }
0112 EXPORT_SYMBOL(ioremap_prot);
0113
0114 void iounmap(const volatile void __iomem *addr)
0115 {
0116 if (!plat_iounmap(addr) && !IS_KSEG1(addr))
0117 vunmap((void *)((unsigned long)addr & PAGE_MASK));
0118 }
0119 EXPORT_SYMBOL(iounmap);