Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * This file is subject to the terms and conditions of the GNU General Public
0003  * License.  See the file "COPYING" in the main directory of this archive
0004  * for more details.
0005  *
0006  * Copyright (C) 1994, 1995 Waldorf GmbH
0007  * Copyright (C) 1994 - 2000, 06 Ralf Baechle
0008  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
0009  * Copyright (C) 2004, 2005  MIPS Technologies, Inc.  All rights reserved.
0010  *  Author: Maciej W. Rozycki <macro@mips.com>
0011  */
0012 #ifndef _ASM_IO_H
0013 #define _ASM_IO_H
0014 
0015 #define ARCH_HAS_IOREMAP_WC
0016 
0017 #include <linux/compiler.h>
0018 #include <linux/kernel.h>
0019 #include <linux/types.h>
0020 #include <linux/irqflags.h>
0021 
0022 #include <asm/addrspace.h>
0023 #include <asm/barrier.h>
0024 #include <asm/bug.h>
0025 #include <asm/byteorder.h>
0026 #include <asm/cpu.h>
0027 #include <asm/cpu-features.h>
0028 #include <asm-generic/iomap.h>
0029 #include <asm/page.h>
0030 #include <asm/pgtable-bits.h>
0031 #include <asm/processor.h>
0032 #include <asm/string.h>
0033 #include <mangle-port.h>
0034 
0035 /*
0036  * Raw operations are never swapped in software.  OTOH values that raw
0037  * operations are working on may or may not have been swapped by the bus
0038  * hardware.  An example use would be for flash memory that's used for
0039  * execute in place.
0040  */
0041 # define __raw_ioswabb(a, x)    (x)
0042 # define __raw_ioswabw(a, x)    (x)
0043 # define __raw_ioswabl(a, x)    (x)
0044 # define __raw_ioswabq(a, x)    (x)
0045 # define ____raw_ioswabq(a, x)  (x)
0046 
0047 # define __relaxed_ioswabb ioswabb
0048 # define __relaxed_ioswabw ioswabw
0049 # define __relaxed_ioswabl ioswabl
0050 # define __relaxed_ioswabq ioswabq
0051 
0052 /* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */
0053 
0054 /*
0055  * On MIPS I/O ports are memory mapped, so we access them using normal
0056  * load/store instructions. mips_io_port_base is the virtual address to
0057  * which all ports are being mapped.  For sake of efficiency some code
0058  * assumes that this is an address that can be loaded with a single lui
0059  * instruction, so the lower 16 bits must be zero.  Should be true on
0060  * any sane architecture; generic code does not use this assumption.
0061  */
0062 extern unsigned long mips_io_port_base;
0063 
0064 static inline void set_io_port_base(unsigned long base)
0065 {
0066     mips_io_port_base = base;
0067 }
0068 
0069 /*
0070  * Provide the necessary definitions for generic iomap. We make use of
0071  * mips_io_port_base for iomap(), but we don't reserve any low addresses for
0072  * use with I/O ports.
0073  */
0074 
0075 #define HAVE_ARCH_PIO_SIZE
0076 #define PIO_OFFSET  mips_io_port_base
0077 #define PIO_MASK    IO_SPACE_LIMIT
0078 #define PIO_RESERVED    0x0UL
0079 
0080 /*
0081  * Enforce in-order execution of data I/O.  In the MIPS architecture
0082  * these are equivalent to corresponding platform-specific memory
0083  * barriers defined in <asm/barrier.h>.  API pinched from PowerPC,
0084  * with sync additionally defined.
0085  */
0086 #define iobarrier_rw() mb()
0087 #define iobarrier_r() rmb()
0088 #define iobarrier_w() wmb()
0089 #define iobarrier_sync() iob()
0090 
0091 /*
0092  *     virt_to_phys    -       map virtual addresses to physical
0093  *     @address: address to remap
0094  *
0095  *     The returned physical address is the physical (CPU) mapping for
0096  *     the memory address given. It is only valid to use this function on
0097  *     addresses directly mapped or allocated via kmalloc.
0098  *
0099  *     This function does not give bus mappings for DMA transfers. In
0100  *     almost all conceivable cases a device driver should not be using
0101  *     this function
0102  */
0103 static inline unsigned long __virt_to_phys_nodebug(volatile const void *address)
0104 {
0105     return __pa(address);
0106 }
0107 
0108 #ifdef CONFIG_DEBUG_VIRTUAL
0109 extern phys_addr_t __virt_to_phys(volatile const void *x);
0110 #else
0111 #define __virt_to_phys(x)   __virt_to_phys_nodebug(x)
0112 #endif
0113 
0114 #define virt_to_phys virt_to_phys
0115 static inline phys_addr_t virt_to_phys(const volatile void *x)
0116 {
0117     return __virt_to_phys(x);
0118 }
0119 
0120 /*
0121  *     phys_to_virt    -       map physical address to virtual
0122  *     @address: address to remap
0123  *
0124  *     The returned virtual address is a current CPU mapping for
0125  *     the memory address given. It is only valid to use this function on
0126  *     addresses that have a kernel mapping
0127  *
0128  *     This function does not handle bus mappings for DMA transfers. In
0129  *     almost all conceivable cases a device driver should not be using
0130  *     this function
0131  */
0132 static inline void * phys_to_virt(unsigned long address)
0133 {
0134     return __va(address);
0135 }
0136 
0137 /*
0138  * ISA I/O bus memory addresses are 1:1 with the physical address.
0139  */
0140 static inline unsigned long isa_virt_to_bus(volatile void *address)
0141 {
0142     return virt_to_phys(address);
0143 }
0144 
0145 static inline void *isa_bus_to_virt(unsigned long address)
0146 {
0147     return phys_to_virt(address);
0148 }
0149 
0150 /*
0151  * Change "struct page" to physical address.
0152  */
0153 #define page_to_phys(page)  ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
0154 
0155 void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
0156         unsigned long prot_val);
0157 void iounmap(const volatile void __iomem *addr);
0158 
0159 /*
0160  * ioremap     -   map bus memory into CPU space
0161  * @offset:    bus address of the memory
0162  * @size:      size of the resource to map
0163  *
0164  * ioremap performs a platform specific sequence of operations to
0165  * make bus memory CPU accessible via the readb/readw/readl/writeb/
0166  * writew/writel functions and the other mmio helpers. The returned
0167  * address is not guaranteed to be usable directly as a virtual
0168  * address.
0169  */
0170 #define ioremap(offset, size)                       \
0171     ioremap_prot((offset), (size), _CACHE_UNCACHED)
0172 #define ioremap_uc      ioremap
0173 
0174 /*
0175  * ioremap_cache -  map bus memory into CPU space
0176  * @offset:     bus address of the memory
0177  * @size:       size of the resource to map
0178  *
0179  * ioremap_cache performs a platform specific sequence of operations to
0180  * make bus memory CPU accessible via the readb/readw/readl/writeb/
0181  * writew/writel functions and the other mmio helpers. The returned
0182  * address is not guaranteed to be usable directly as a virtual
0183  * address.
0184  *
0185  * This version of ioremap ensures that the memory is marked cachable by
0186  * the CPU.  Also enables full write-combining.  Useful for some
0187  * memory-like regions on I/O busses.
0188  */
0189 #define ioremap_cache(offset, size)                 \
0190     ioremap_prot((offset), (size), _page_cachable_default)
0191 
0192 /*
0193  * ioremap_wc     -   map bus memory into CPU space
0194  * @offset:    bus address of the memory
0195  * @size:      size of the resource to map
0196  *
0197  * ioremap_wc performs a platform specific sequence of operations to
0198  * make bus memory CPU accessible via the readb/readw/readl/writeb/
0199  * writew/writel functions and the other mmio helpers. The returned
0200  * address is not guaranteed to be usable directly as a virtual
0201  * address.
0202  *
0203  * This version of ioremap ensures that the memory is marked uncachable
0204  * but accelerated by means of write-combining feature. It is specifically
0205  * useful for PCIe prefetchable windows, which may vastly improve a
0206  * communications performance. If it was determined on boot stage, what
0207  * CPU CCA doesn't support UCA, the method shall fall-back to the
0208  * _CACHE_UNCACHED option (see cpu_probe() method).
0209  */
0210 #define ioremap_wc(offset, size)                    \
0211     ioremap_prot((offset), (size), boot_cpu_data.writecombine)
0212 
0213 #if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_CPU_LOONGSON64)
0214 #define war_io_reorder_wmb()        wmb()
0215 #else
0216 #define war_io_reorder_wmb()        barrier()
0217 #endif
0218 
0219 #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, barrier, relax, irq) \
0220                                     \
0221 static inline void pfx##write##bwlq(type val,               \
0222                     volatile void __iomem *mem)     \
0223 {                                   \
0224     volatile type *__mem;                       \
0225     type __val;                         \
0226                                     \
0227     if (barrier)                            \
0228         iobarrier_rw();                     \
0229     else                                \
0230         war_io_reorder_wmb();                   \
0231                                     \
0232     __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem));    \
0233                                     \
0234     __val = pfx##ioswab##bwlq(__mem, val);              \
0235                                     \
0236     if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
0237         *__mem = __val;                     \
0238     else if (cpu_has_64bits) {                  \
0239         unsigned long __flags;                  \
0240         type __tmp;                     \
0241                                     \
0242         if (irq)                        \
0243             local_irq_save(__flags);            \
0244         __asm__ __volatile__(                   \
0245             ".set   push"       "\t\t# __writeq""\n\t"  \
0246             ".set   arch=r4000"         "\n\t"  \
0247             "dsll32 %L0, %L0, 0"            "\n\t"  \
0248             "dsrl32 %L0, %L0, 0"            "\n\t"  \
0249             "dsll32 %M0, %M0, 0"            "\n\t"  \
0250             "or %L0, %L0, %M0"          "\n\t"  \
0251             "sd %L0, %2"            "\n\t"  \
0252             ".set   pop"                "\n"    \
0253             : "=r" (__tmp)                  \
0254             : "0" (__val), "m" (*__mem));           \
0255         if (irq)                        \
0256             local_irq_restore(__flags);         \
0257     } else                              \
0258         BUG();                          \
0259 }                                   \
0260                                     \
0261 static inline type pfx##read##bwlq(const volatile void __iomem *mem)    \
0262 {                                   \
0263     volatile type *__mem;                       \
0264     type __val;                         \
0265                                     \
0266     __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem));    \
0267                                     \
0268     if (barrier)                            \
0269         iobarrier_rw();                     \
0270                                     \
0271     if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
0272         __val = *__mem;                     \
0273     else if (cpu_has_64bits) {                  \
0274         unsigned long __flags;                  \
0275                                     \
0276         if (irq)                        \
0277             local_irq_save(__flags);            \
0278         __asm__ __volatile__(                   \
0279             ".set   push"       "\t\t# __readq" "\n\t"  \
0280             ".set   arch=r4000"         "\n\t"  \
0281             "ld %L0, %1"            "\n\t"  \
0282             "dsra32 %M0, %L0, 0"            "\n\t"  \
0283             "sll    %L0, %L0, 0"            "\n\t"  \
0284             ".set   pop"                "\n"    \
0285             : "=r" (__val)                  \
0286             : "m" (*__mem));                \
0287         if (irq)                        \
0288             local_irq_restore(__flags);         \
0289     } else {                            \
0290         __val = 0;                      \
0291         BUG();                          \
0292     }                               \
0293                                     \
0294     /* prevent prefetching of coherent DMA data prematurely */  \
0295     if (!relax)                         \
0296         rmb();                          \
0297     return pfx##ioswab##bwlq(__mem, __val);             \
0298 }
0299 
0300 #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, barrier, relax, p)   \
0301                                     \
0302 static inline void pfx##out##bwlq##p(type val, unsigned long port)  \
0303 {                                   \
0304     volatile type *__addr;                      \
0305     type __val;                         \
0306                                     \
0307     if (barrier)                            \
0308         iobarrier_rw();                     \
0309     else                                \
0310         war_io_reorder_wmb();                   \
0311                                     \
0312     __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
0313                                     \
0314     __val = pfx##ioswab##bwlq(__addr, val);             \
0315                                     \
0316     /* Really, we want this to be atomic */             \
0317     BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long));     \
0318                                     \
0319     *__addr = __val;                        \
0320 }                                   \
0321                                     \
0322 static inline type pfx##in##bwlq##p(unsigned long port)         \
0323 {                                   \
0324     volatile type *__addr;                      \
0325     type __val;                         \
0326                                     \
0327     __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
0328                                     \
0329     BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long));     \
0330                                     \
0331     if (barrier)                            \
0332         iobarrier_rw();                     \
0333                                     \
0334     __val = *__addr;                        \
0335                                     \
0336     /* prevent prefetching of coherent DMA data prematurely */  \
0337     if (!relax)                         \
0338         rmb();                          \
0339     return pfx##ioswab##bwlq(__addr, __val);            \
0340 }
0341 
0342 #define __BUILD_MEMORY_PFX(bus, bwlq, type, relax)          \
0343                                     \
0344 __BUILD_MEMORY_SINGLE(bus, bwlq, type, 1, relax, 1)
0345 
0346 #define BUILDIO_MEM(bwlq, type)                     \
0347                                     \
0348 __BUILD_MEMORY_PFX(__raw_, bwlq, type, 0)               \
0349 __BUILD_MEMORY_PFX(__relaxed_, bwlq, type, 1)               \
0350 __BUILD_MEMORY_PFX(__mem_, bwlq, type, 0)               \
0351 __BUILD_MEMORY_PFX(, bwlq, type, 0)
0352 
0353 BUILDIO_MEM(b, u8)
0354 BUILDIO_MEM(w, u16)
0355 BUILDIO_MEM(l, u32)
0356 #ifdef CONFIG_64BIT
0357 BUILDIO_MEM(q, u64)
0358 #else
0359 __BUILD_MEMORY_PFX(__raw_, q, u64, 0)
0360 __BUILD_MEMORY_PFX(__mem_, q, u64, 0)
0361 #endif
0362 
0363 #define __BUILD_IOPORT_PFX(bus, bwlq, type)             \
0364     __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0,)           \
0365     __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0, _p)
0366 
0367 #define BUILDIO_IOPORT(bwlq, type)                  \
0368     __BUILD_IOPORT_PFX(, bwlq, type)                \
0369     __BUILD_IOPORT_PFX(__mem_, bwlq, type)
0370 
0371 BUILDIO_IOPORT(b, u8)
0372 BUILDIO_IOPORT(w, u16)
0373 BUILDIO_IOPORT(l, u32)
0374 #ifdef CONFIG_64BIT
0375 BUILDIO_IOPORT(q, u64)
0376 #endif
0377 
0378 #define __BUILDIO(bwlq, type)                       \
0379                                     \
0380 __BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 1, 0, 0)
0381 
0382 __BUILDIO(q, u64)
0383 
0384 #define readb_relaxed           __relaxed_readb
0385 #define readw_relaxed           __relaxed_readw
0386 #define readl_relaxed           __relaxed_readl
0387 #ifdef CONFIG_64BIT
0388 #define readq_relaxed           __relaxed_readq
0389 #endif
0390 
0391 #define writeb_relaxed          __relaxed_writeb
0392 #define writew_relaxed          __relaxed_writew
0393 #define writel_relaxed          __relaxed_writel
0394 #ifdef CONFIG_64BIT
0395 #define writeq_relaxed          __relaxed_writeq
0396 #endif
0397 
0398 #define readb_be(addr)                          \
0399     __raw_readb((__force unsigned *)(addr))
0400 #define readw_be(addr)                          \
0401     be16_to_cpu(__raw_readw((__force unsigned *)(addr)))
0402 #define readl_be(addr)                          \
0403     be32_to_cpu(__raw_readl((__force unsigned *)(addr)))
0404 #define readq_be(addr)                          \
0405     be64_to_cpu(__raw_readq((__force unsigned *)(addr)))
0406 
0407 #define writeb_be(val, addr)                        \
0408     __raw_writeb((val), (__force unsigned *)(addr))
0409 #define writew_be(val, addr)                        \
0410     __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr))
0411 #define writel_be(val, addr)                        \
0412     __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr))
0413 #define writeq_be(val, addr)                        \
0414     __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr))
0415 
0416 /*
0417  * Some code tests for these symbols
0418  */
0419 #ifdef CONFIG_64BIT
0420 #define readq               readq
0421 #define writeq              writeq
0422 #endif
0423 
0424 #define __BUILD_MEMORY_STRING(bwlq, type)               \
0425                                     \
0426 static inline void writes##bwlq(volatile void __iomem *mem,     \
0427                 const void *addr, unsigned int count)   \
0428 {                                   \
0429     const volatile type *__addr = addr;             \
0430                                     \
0431     while (count--) {                       \
0432         __mem_write##bwlq(*__addr, mem);            \
0433         __addr++;                       \
0434     }                               \
0435 }                                   \
0436                                     \
0437 static inline void reads##bwlq(volatile void __iomem *mem, void *addr,  \
0438                    unsigned int count)          \
0439 {                                   \
0440     volatile type *__addr = addr;                   \
0441                                     \
0442     while (count--) {                       \
0443         *__addr = __mem_read##bwlq(mem);            \
0444         __addr++;                       \
0445     }                               \
0446 }
0447 
0448 #define __BUILD_IOPORT_STRING(bwlq, type)               \
0449                                     \
0450 static inline void outs##bwlq(unsigned long port, const void *addr, \
0451                   unsigned int count)           \
0452 {                                   \
0453     const volatile type *__addr = addr;             \
0454                                     \
0455     while (count--) {                       \
0456         __mem_out##bwlq(*__addr, port);             \
0457         __addr++;                       \
0458     }                               \
0459 }                                   \
0460                                     \
0461 static inline void ins##bwlq(unsigned long port, void *addr,        \
0462                  unsigned int count)            \
0463 {                                   \
0464     volatile type *__addr = addr;                   \
0465                                     \
0466     while (count--) {                       \
0467         *__addr = __mem_in##bwlq(port);             \
0468         __addr++;                       \
0469     }                               \
0470 }
0471 
0472 #define BUILDSTRING(bwlq, type)                     \
0473                                     \
0474 __BUILD_MEMORY_STRING(bwlq, type)                   \
0475 __BUILD_IOPORT_STRING(bwlq, type)
0476 
0477 BUILDSTRING(b, u8)
0478 BUILDSTRING(w, u16)
0479 BUILDSTRING(l, u32)
0480 #ifdef CONFIG_64BIT
0481 BUILDSTRING(q, u64)
0482 #endif
0483 
0484 static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
0485 {
0486     memset((void __force *) addr, val, count);
0487 }
0488 static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
0489 {
0490     memcpy(dst, (void __force *) src, count);
0491 }
0492 static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
0493 {
0494     memcpy((void __force *) dst, src, count);
0495 }
0496 
0497 /*
0498  * The caches on some architectures aren't dma-coherent and have need to
0499  * handle this in software.  There are three types of operations that
0500  * can be applied to dma buffers.
0501  *
0502  *  - dma_cache_wback_inv(start, size) makes caches and coherent by
0503  *    writing the content of the caches back to memory, if necessary.
0504  *    The function also invalidates the affected part of the caches as
0505  *    necessary before DMA transfers from outside to memory.
0506  *  - dma_cache_wback(start, size) makes caches and coherent by
0507  *    writing the content of the caches back to memory, if necessary.
0508  *    The function also invalidates the affected part of the caches as
0509  *    necessary before DMA transfers from outside to memory.
0510  *  - dma_cache_inv(start, size) invalidates the affected parts of the
0511  *    caches.  Dirty lines of the caches may be written back or simply
0512  *    be discarded.  This operation is necessary before dma operations
0513  *    to the memory.
0514  *
0515  * This API used to be exported; it now is for arch code internal use only.
0516  */
0517 #ifdef CONFIG_DMA_NONCOHERENT
0518 
0519 extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
0520 extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
0521 extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
0522 
0523 #define dma_cache_wback_inv(start, size)    _dma_cache_wback_inv(start, size)
0524 #define dma_cache_wback(start, size)        _dma_cache_wback(start, size)
0525 #define dma_cache_inv(start, size)      _dma_cache_inv(start, size)
0526 
0527 #else /* Sane hardware */
0528 
0529 #define dma_cache_wback_inv(start,size) \
0530     do { (void) (start); (void) (size); } while (0)
0531 #define dma_cache_wback(start,size) \
0532     do { (void) (start); (void) (size); } while (0)
0533 #define dma_cache_inv(start,size)   \
0534     do { (void) (start); (void) (size); } while (0)
0535 
0536 #endif /* CONFIG_DMA_NONCOHERENT */
0537 
0538 /*
0539  * Read a 32-bit register that requires a 64-bit read cycle on the bus.
0540  * Avoid interrupt mucking, just adjust the address for 4-byte access.
0541  * Assume the addresses are 8-byte aligned.
0542  */
0543 #ifdef __MIPSEB__
0544 #define __CSR_32_ADJUST 4
0545 #else
0546 #define __CSR_32_ADJUST 0
0547 #endif
0548 
0549 #define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
0550 #define csr_in32(a)    (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
0551 
0552 /*
0553  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
0554  * access
0555  */
0556 #define xlate_dev_mem_ptr(p)    __va(p)
0557 
0558 void __ioread64_copy(void *to, const void __iomem *from, size_t count);
0559 
0560 #endif /* _ASM_IO_H */