Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * This file is subject to the terms and conditions of the GNU General Public
0003  * License.  See the file "COPYING" in the main directory of this archive
0004  * for more details.
0005  *
0006  * (C) Copyright 1995 1996 Linus Torvalds
0007  * (C) Copyright 2001, 2002 Ralf Baechle
0008  */
0009 #include <linux/export.h>
0010 #include <asm/addrspace.h>
0011 #include <asm/byteorder.h>
0012 #include <linux/ioport.h>
0013 #include <linux/sched.h>
0014 #include <linux/slab.h>
0015 #include <linux/vmalloc.h>
0016 #include <linux/mm_types.h>
0017 #include <linux/io.h>
0018 #include <asm/cacheflush.h>
0019 #include <asm/tlbflush.h>
0020 #include <ioremap.h>
0021 
0022 #define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
0023 #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
0024 
0025 static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
0026                    void *arg)
0027 {
0028     unsigned long i;
0029 
0030     for (i = 0; i < nr_pages; i++) {
0031         if (pfn_valid(start_pfn + i) &&
0032             !PageReserved(pfn_to_page(start_pfn + i)))
0033             return 1;
0034     }
0035 
0036     return 0;
0037 }
0038 
0039 /*
0040  * ioremap_prot     -   map bus memory into CPU space
0041  * @phys_addr:    bus address of the memory
0042  * @size:      size of the resource to map
0043  *
0044  * ioremap_prot gives the caller control over cache coherency attributes (CCA)
0045  */
0046 void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size,
0047         unsigned long prot_val)
0048 {
0049     unsigned long flags = prot_val & _CACHE_MASK;
0050     unsigned long offset, pfn, last_pfn;
0051     struct vm_struct *area;
0052     phys_addr_t last_addr;
0053     unsigned long vaddr;
0054     void __iomem *cpu_addr;
0055 
0056     cpu_addr = plat_ioremap(phys_addr, size, flags);
0057     if (cpu_addr)
0058         return cpu_addr;
0059 
0060     phys_addr = fixup_bigphys_addr(phys_addr, size);
0061 
0062     /* Don't allow wraparound or zero size */
0063     last_addr = phys_addr + size - 1;
0064     if (!size || last_addr < phys_addr)
0065         return NULL;
0066 
0067     /*
0068      * Map uncached objects in the low 512mb of address space using KSEG1,
0069      * otherwise map using page tables.
0070      */
0071     if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
0072         flags == _CACHE_UNCACHED)
0073         return (void __iomem *) CKSEG1ADDR(phys_addr);
0074 
0075     /*
0076      * Don't allow anybody to remap RAM that may be allocated by the page
0077      * allocator, since that could lead to races & data clobbering.
0078      */
0079     pfn = PFN_DOWN(phys_addr);
0080     last_pfn = PFN_DOWN(last_addr);
0081     if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
0082                   __ioremap_check_ram) == 1) {
0083         WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
0084               &phys_addr, &last_addr);
0085         return NULL;
0086     }
0087 
0088     /*
0089      * Mappings have to be page-aligned
0090      */
0091     offset = phys_addr & ~PAGE_MASK;
0092     phys_addr &= PAGE_MASK;
0093     size = PAGE_ALIGN(last_addr + 1) - phys_addr;
0094 
0095     /*
0096      * Ok, go for it..
0097      */
0098     area = get_vm_area(size, VM_IOREMAP);
0099     if (!area)
0100         return NULL;
0101     vaddr = (unsigned long)area->addr;
0102 
0103     flags |= _PAGE_GLOBAL | _PAGE_PRESENT | __READABLE | __WRITEABLE;
0104     if (ioremap_page_range(vaddr, vaddr + size, phys_addr,
0105             __pgprot(flags))) {
0106         free_vm_area(area);
0107         return NULL;
0108     }
0109 
0110     return (void __iomem *)(vaddr + offset);
0111 }
0112 EXPORT_SYMBOL(ioremap_prot);
0113 
0114 void iounmap(const volatile void __iomem *addr)
0115 {
0116     if (!plat_iounmap(addr) && !IS_KSEG1(addr))
0117         vunmap((void *)((unsigned long)addr & PAGE_MASK));
0118 }
0119 EXPORT_SYMBOL(iounmap);