Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef __ALPHA_IO_H
0003 #define __ALPHA_IO_H
0004 
0005 #ifdef __KERNEL__
0006 
0007 #include <linux/kernel.h>
0008 #include <linux/mm.h>
0009 #include <asm/compiler.h>
0010 #include <asm/machvec.h>
0011 #include <asm/hwrpb.h>
0012 
0013 /* The generic header contains only prototypes.  Including it ensures that
0014    the implementation we have here matches that interface.  */
0015 #include <asm-generic/iomap.h>
0016 
0017 /* We don't use IO slowdowns on the Alpha, but.. */
0018 #define __SLOW_DOWN_IO  do { } while (0)
0019 #define SLOW_DOWN_IO    do { } while (0)
0020 
0021 /*
0022  * Virtual -> physical identity mapping starts at this offset
0023  */
0024 #ifdef USE_48_BIT_KSEG
0025 #define IDENT_ADDR     0xffff800000000000UL
0026 #else
0027 #define IDENT_ADDR     0xfffffc0000000000UL
0028 #endif
0029 
0030 /*
0031  * We try to avoid hae updates (thus the cache), but when we
0032  * do need to update the hae, we need to do it atomically, so
0033  * that any interrupts wouldn't get confused with the hae
0034  * register not being up-to-date with respect to the hardware
0035  * value.
0036  */
0037 extern inline void __set_hae(unsigned long new_hae)
0038 {
0039     unsigned long flags = swpipl(IPL_MAX);
0040 
0041     barrier();
0042 
0043     alpha_mv.hae_cache = new_hae;
0044     *alpha_mv.hae_register = new_hae;
0045     mb();
0046     /* Re-read to make sure it was written.  */
0047     new_hae = *alpha_mv.hae_register;
0048 
0049     setipl(flags);
0050     barrier();
0051 }
0052 
0053 extern inline void set_hae(unsigned long new_hae)
0054 {
0055     if (new_hae != alpha_mv.hae_cache)
0056         __set_hae(new_hae);
0057 }
0058 
0059 /*
0060  * Change virtual addresses to physical addresses and vv.
0061  */
0062 #ifdef USE_48_BIT_KSEG
0063 static inline unsigned long virt_to_phys(volatile void *address)
0064 {
0065     return (unsigned long)address - IDENT_ADDR;
0066 }
0067 
0068 static inline void * phys_to_virt(unsigned long address)
0069 {
0070     return (void *) (address + IDENT_ADDR);
0071 }
0072 #else
0073 static inline unsigned long virt_to_phys(volatile void *address)
0074 {
0075         unsigned long phys = (unsigned long)address;
0076 
0077     /* Sign-extend from bit 41.  */
0078     phys <<= (64 - 41);
0079     phys = (long)phys >> (64 - 41);
0080 
0081     /* Crop to the physical address width of the processor.  */
0082         phys &= (1ul << hwrpb->pa_bits) - 1;
0083 
0084         return phys;
0085 }
0086 
0087 static inline void * phys_to_virt(unsigned long address)
0088 {
0089         return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1)));
0090 }
0091 #endif
0092 
0093 #define page_to_phys(page)  page_to_pa(page)
0094 
0095 /* Maximum PIO space address supported?  */
0096 #define IO_SPACE_LIMIT 0xffff
0097 
0098 /*
0099  * Change addresses as seen by the kernel (virtual) to addresses as
0100  * seen by a device (bus), and vice versa.
0101  *
0102  * Note that this only works for a limited range of kernel addresses,
0103  * and very well may not span all memory.  Consider this interface 
0104  * deprecated in favour of the DMA-mapping API.
0105  */
0106 extern unsigned long __direct_map_base;
0107 extern unsigned long __direct_map_size;
0108 
0109 static inline unsigned long __deprecated isa_virt_to_bus(volatile void *address)
0110 {
0111     unsigned long phys = virt_to_phys(address);
0112     unsigned long bus = phys + __direct_map_base;
0113     return phys <= __direct_map_size ? bus : 0;
0114 }
0115 #define isa_virt_to_bus isa_virt_to_bus
0116 
0117 static inline void * __deprecated isa_bus_to_virt(unsigned long address)
0118 {
0119     void *virt;
0120 
0121     /* This check is a sanity check but also ensures that bus address 0
0122        maps to virtual address 0 which is useful to detect null pointers
0123        (the NCR driver is much simpler if NULL pointers are preserved).  */
0124     address -= __direct_map_base;
0125     virt = phys_to_virt(address);
0126     return (long)address <= 0 ? NULL : virt;
0127 }
0128 #define isa_bus_to_virt isa_bus_to_virt
0129 
0130 /*
0131  * There are different chipsets to interface the Alpha CPUs to the world.
0132  */
0133 
0134 #define IO_CONCAT(a,b)  _IO_CONCAT(a,b)
0135 #define _IO_CONCAT(a,b) a ## _ ## b
0136 
0137 #ifdef CONFIG_ALPHA_GENERIC
0138 
0139 /* In a generic kernel, we always go through the machine vector.  */
0140 
0141 #define REMAP1(TYPE, NAME, QUAL)                    \
0142 static inline TYPE generic_##NAME(QUAL void __iomem *addr)      \
0143 {                                   \
0144     return alpha_mv.mv_##NAME(addr);                \
0145 }
0146 
0147 #define REMAP2(TYPE, NAME, QUAL)                    \
0148 static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr)  \
0149 {                                   \
0150     alpha_mv.mv_##NAME(b, addr);                    \
0151 }
0152 
0153 REMAP1(unsigned int, ioread8, const)
0154 REMAP1(unsigned int, ioread16, const)
0155 REMAP1(unsigned int, ioread32, const)
0156 REMAP1(u8, readb, const volatile)
0157 REMAP1(u16, readw, const volatile)
0158 REMAP1(u32, readl, const volatile)
0159 REMAP1(u64, readq, const volatile)
0160 
0161 REMAP2(u8, iowrite8, /**/)
0162 REMAP2(u16, iowrite16, /**/)
0163 REMAP2(u32, iowrite32, /**/)
0164 REMAP2(u8, writeb, volatile)
0165 REMAP2(u16, writew, volatile)
0166 REMAP2(u32, writel, volatile)
0167 REMAP2(u64, writeq, volatile)
0168 
0169 #undef REMAP1
0170 #undef REMAP2
0171 
0172 extern inline void __iomem *generic_ioportmap(unsigned long a)
0173 {
0174     return alpha_mv.mv_ioportmap(a);
0175 }
0176 
0177 static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s)
0178 {
0179     return alpha_mv.mv_ioremap(a, s);
0180 }
0181 
0182 static inline void generic_iounmap(volatile void __iomem *a)
0183 {
0184     return alpha_mv.mv_iounmap(a);
0185 }
0186 
0187 static inline int generic_is_ioaddr(unsigned long a)
0188 {
0189     return alpha_mv.mv_is_ioaddr(a);
0190 }
0191 
0192 static inline int generic_is_mmio(const volatile void __iomem *a)
0193 {
0194     return alpha_mv.mv_is_mmio(a);
0195 }
0196 
0197 #define __IO_PREFIX     generic
0198 #define generic_trivial_rw_bw   0
0199 #define generic_trivial_rw_lq   0
0200 #define generic_trivial_io_bw   0
0201 #define generic_trivial_io_lq   0
0202 #define generic_trivial_iounmap 0
0203 
0204 #else
0205 
0206 #if defined(CONFIG_ALPHA_APECS)
0207 # include <asm/core_apecs.h>
0208 #elif defined(CONFIG_ALPHA_CIA)
0209 # include <asm/core_cia.h>
0210 #elif defined(CONFIG_ALPHA_IRONGATE)
0211 # include <asm/core_irongate.h>
0212 #elif defined(CONFIG_ALPHA_JENSEN)
0213 # include <asm/jensen.h>
0214 #elif defined(CONFIG_ALPHA_LCA)
0215 # include <asm/core_lca.h>
0216 #elif defined(CONFIG_ALPHA_MARVEL)
0217 # include <asm/core_marvel.h>
0218 #elif defined(CONFIG_ALPHA_MCPCIA)
0219 # include <asm/core_mcpcia.h>
0220 #elif defined(CONFIG_ALPHA_POLARIS)
0221 # include <asm/core_polaris.h>
0222 #elif defined(CONFIG_ALPHA_T2)
0223 # include <asm/core_t2.h>
0224 #elif defined(CONFIG_ALPHA_TSUNAMI)
0225 # include <asm/core_tsunami.h>
0226 #elif defined(CONFIG_ALPHA_TITAN)
0227 # include <asm/core_titan.h>
0228 #elif defined(CONFIG_ALPHA_WILDFIRE)
0229 # include <asm/core_wildfire.h>
0230 #else
0231 #error "What system is this?"
0232 #endif
0233 
0234 #endif /* GENERIC */
0235 
0236 /*
0237  * We always have external versions of these routines.
0238  */
0239 extern u8       inb(unsigned long port);
0240 extern u16      inw(unsigned long port);
0241 extern u32      inl(unsigned long port);
0242 extern void     outb(u8 b, unsigned long port);
0243 extern void     outw(u16 b, unsigned long port);
0244 extern void     outl(u32 b, unsigned long port);
0245 
0246 extern u8       readb(const volatile void __iomem *addr);
0247 extern u16      readw(const volatile void __iomem *addr);
0248 extern u32      readl(const volatile void __iomem *addr);
0249 extern u64      readq(const volatile void __iomem *addr);
0250 extern void     writeb(u8 b, volatile void __iomem *addr);
0251 extern void     writew(u16 b, volatile void __iomem *addr);
0252 extern void     writel(u32 b, volatile void __iomem *addr);
0253 extern void     writeq(u64 b, volatile void __iomem *addr);
0254 
0255 extern u8       __raw_readb(const volatile void __iomem *addr);
0256 extern u16      __raw_readw(const volatile void __iomem *addr);
0257 extern u32      __raw_readl(const volatile void __iomem *addr);
0258 extern u64      __raw_readq(const volatile void __iomem *addr);
0259 extern void     __raw_writeb(u8 b, volatile void __iomem *addr);
0260 extern void     __raw_writew(u16 b, volatile void __iomem *addr);
0261 extern void     __raw_writel(u32 b, volatile void __iomem *addr);
0262 extern void     __raw_writeq(u64 b, volatile void __iomem *addr);
0263 
0264 /*
0265  * Mapping from port numbers to __iomem space is pretty easy.
0266  */
0267 
0268 /* These two have to be extern inline because of the extern prototype from
0269    <asm-generic/iomap.h>.  It is not legal to mix "extern" and "static" for
0270    the same declaration.  */
0271 extern inline void __iomem *ioport_map(unsigned long port, unsigned int size)
0272 {
0273     return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
0274 }
0275 
0276 extern inline void ioport_unmap(void __iomem *addr)
0277 {
0278 }
0279 
0280 static inline void __iomem *ioremap(unsigned long port, unsigned long size)
0281 {
0282     return IO_CONCAT(__IO_PREFIX,ioremap) (port, size);
0283 }
0284 
0285 #define ioremap_wc ioremap
0286 #define ioremap_uc ioremap
0287 
0288 static inline void iounmap(volatile void __iomem *addr)
0289 {
0290     IO_CONCAT(__IO_PREFIX,iounmap)(addr);
0291 }
0292 
0293 static inline int __is_ioaddr(unsigned long addr)
0294 {
0295     return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr);
0296 }
0297 #define __is_ioaddr(a)      __is_ioaddr((unsigned long)(a))
0298 
0299 static inline int __is_mmio(const volatile void __iomem *addr)
0300 {
0301     return IO_CONCAT(__IO_PREFIX,is_mmio)(addr);
0302 }
0303 
0304 
0305 /*
0306  * If the actual I/O bits are sufficiently trivial, then expand inline.
0307  */
0308 
0309 #if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
0310 extern inline unsigned int ioread8(const void __iomem *addr)
0311 {
0312     unsigned int ret;
0313     mb();
0314     ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
0315     mb();
0316     return ret;
0317 }
0318 
0319 extern inline unsigned int ioread16(const void __iomem *addr)
0320 {
0321     unsigned int ret;
0322     mb();
0323     ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
0324     mb();
0325     return ret;
0326 }
0327 
0328 extern inline void iowrite8(u8 b, void __iomem *addr)
0329 {
0330     mb();
0331     IO_CONCAT(__IO_PREFIX, iowrite8)(b, addr);
0332 }
0333 
0334 extern inline void iowrite16(u16 b, void __iomem *addr)
0335 {
0336     mb();
0337     IO_CONCAT(__IO_PREFIX, iowrite16)(b, addr);
0338 }
0339 
0340 extern inline u8 inb(unsigned long port)
0341 {
0342     return ioread8(ioport_map(port, 1));
0343 }
0344 
0345 extern inline u16 inw(unsigned long port)
0346 {
0347     return ioread16(ioport_map(port, 2));
0348 }
0349 
0350 extern inline void outb(u8 b, unsigned long port)
0351 {
0352     iowrite8(b, ioport_map(port, 1));
0353 }
0354 
0355 extern inline void outw(u16 b, unsigned long port)
0356 {
0357     iowrite16(b, ioport_map(port, 2));
0358 }
0359 #endif
0360 
0361 #if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
0362 extern inline unsigned int ioread32(const void __iomem *addr)
0363 {
0364     unsigned int ret;
0365     mb();
0366     ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
0367     mb();
0368     return ret;
0369 }
0370 
0371 extern inline void iowrite32(u32 b, void __iomem *addr)
0372 {
0373     mb();
0374     IO_CONCAT(__IO_PREFIX, iowrite32)(b, addr);
0375 }
0376 
0377 extern inline u32 inl(unsigned long port)
0378 {
0379     return ioread32(ioport_map(port, 4));
0380 }
0381 
0382 extern inline void outl(u32 b, unsigned long port)
0383 {
0384     iowrite32(b, ioport_map(port, 4));
0385 }
0386 #endif
0387 
0388 #if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
0389 extern inline u8 __raw_readb(const volatile void __iomem *addr)
0390 {
0391     return IO_CONCAT(__IO_PREFIX,readb)(addr);
0392 }
0393 
0394 extern inline u16 __raw_readw(const volatile void __iomem *addr)
0395 {
0396     return IO_CONCAT(__IO_PREFIX,readw)(addr);
0397 }
0398 
0399 extern inline void __raw_writeb(u8 b, volatile void __iomem *addr)
0400 {
0401     IO_CONCAT(__IO_PREFIX,writeb)(b, addr);
0402 }
0403 
0404 extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
0405 {
0406     IO_CONCAT(__IO_PREFIX,writew)(b, addr);
0407 }
0408 
0409 extern inline u8 readb(const volatile void __iomem *addr)
0410 {
0411     u8 ret;
0412     mb();
0413     ret = __raw_readb(addr);
0414     mb();
0415     return ret;
0416 }
0417 
0418 extern inline u16 readw(const volatile void __iomem *addr)
0419 {
0420     u16 ret;
0421     mb();
0422     ret = __raw_readw(addr);
0423     mb();
0424     return ret;
0425 }
0426 
0427 extern inline void writeb(u8 b, volatile void __iomem *addr)
0428 {
0429     mb();
0430     __raw_writeb(b, addr);
0431 }
0432 
0433 extern inline void writew(u16 b, volatile void __iomem *addr)
0434 {
0435     mb();
0436     __raw_writew(b, addr);
0437 }
0438 #endif
0439 
0440 #if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
0441 extern inline u32 __raw_readl(const volatile void __iomem *addr)
0442 {
0443     return IO_CONCAT(__IO_PREFIX,readl)(addr);
0444 }
0445 
0446 extern inline u64 __raw_readq(const volatile void __iomem *addr)
0447 {
0448     return IO_CONCAT(__IO_PREFIX,readq)(addr);
0449 }
0450 
0451 extern inline void __raw_writel(u32 b, volatile void __iomem *addr)
0452 {
0453     IO_CONCAT(__IO_PREFIX,writel)(b, addr);
0454 }
0455 
0456 extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
0457 {
0458     IO_CONCAT(__IO_PREFIX,writeq)(b, addr);
0459 }
0460 
0461 extern inline u32 readl(const volatile void __iomem *addr)
0462 {
0463     u32 ret;
0464     mb();
0465     ret = __raw_readl(addr);
0466     mb();
0467     return ret;
0468 }
0469 
0470 extern inline u64 readq(const volatile void __iomem *addr)
0471 {
0472     u64 ret;
0473     mb();
0474     ret = __raw_readq(addr);
0475     mb();
0476     return ret;
0477 }
0478 
0479 extern inline void writel(u32 b, volatile void __iomem *addr)
0480 {
0481     mb();
0482     __raw_writel(b, addr);
0483 }
0484 
0485 extern inline void writeq(u64 b, volatile void __iomem *addr)
0486 {
0487     mb();
0488     __raw_writeq(b, addr);
0489 }
0490 #endif
0491 
0492 #define ioread16be(p) swab16(ioread16(p))
0493 #define ioread32be(p) swab32(ioread32(p))
0494 #define iowrite16be(v,p) iowrite16(swab16(v), (p))
0495 #define iowrite32be(v,p) iowrite32(swab32(v), (p))
0496 
0497 #define inb_p       inb
0498 #define inw_p       inw
0499 #define inl_p       inl
0500 #define outb_p      outb
0501 #define outw_p      outw
0502 #define outl_p      outl
0503 
0504 extern u8 readb_relaxed(const volatile void __iomem *addr);
0505 extern u16 readw_relaxed(const volatile void __iomem *addr);
0506 extern u32 readl_relaxed(const volatile void __iomem *addr);
0507 extern u64 readq_relaxed(const volatile void __iomem *addr);
0508 
0509 #if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
0510 extern inline u8 readb_relaxed(const volatile void __iomem *addr)
0511 {
0512     mb();
0513     return __raw_readb(addr);
0514 }
0515 
0516 extern inline u16 readw_relaxed(const volatile void __iomem *addr)
0517 {
0518     mb();
0519     return __raw_readw(addr);
0520 }
0521 #endif
0522 
0523 #if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
0524 extern inline u32 readl_relaxed(const volatile void __iomem *addr)
0525 {
0526     mb();
0527     return __raw_readl(addr);
0528 }
0529 
0530 extern inline u64 readq_relaxed(const volatile void __iomem *addr)
0531 {
0532     mb();
0533     return __raw_readq(addr);
0534 }
0535 #endif
0536 
0537 #define writeb_relaxed  writeb
0538 #define writew_relaxed  writew
0539 #define writel_relaxed  writel
0540 #define writeq_relaxed  writeq
0541 
0542 /*
0543  * String version of IO memory access ops:
0544  */
0545 extern void memcpy_fromio(void *, const volatile void __iomem *, long);
0546 extern void memcpy_toio(volatile void __iomem *, const void *, long);
0547 extern void _memset_c_io(volatile void __iomem *, unsigned long, long);
0548 
0549 static inline void memset_io(volatile void __iomem *addr, u8 c, long len)
0550 {
0551     _memset_c_io(addr, 0x0101010101010101UL * c, len);
0552 }
0553 
0554 #define __HAVE_ARCH_MEMSETW_IO
0555 static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len)
0556 {
0557     _memset_c_io(addr, 0x0001000100010001UL * c, len);
0558 }
0559 
0560 /*
0561  * String versions of in/out ops:
0562  */
0563 extern void insb (unsigned long port, void *dst, unsigned long count);
0564 extern void insw (unsigned long port, void *dst, unsigned long count);
0565 extern void insl (unsigned long port, void *dst, unsigned long count);
0566 extern void outsb (unsigned long port, const void *src, unsigned long count);
0567 extern void outsw (unsigned long port, const void *src, unsigned long count);
0568 extern void outsl (unsigned long port, const void *src, unsigned long count);
0569 
0570 /*
0571  * The Alpha Jensen hardware for some rather strange reason puts
0572  * the RTC clock at 0x170 instead of 0x70. Probably due to some
0573  * misguided idea about using 0x70 for NMI stuff.
0574  *
0575  * These defines will override the defaults when doing RTC queries
0576  */
0577 
0578 #ifdef CONFIG_ALPHA_GENERIC
0579 # define RTC_PORT(x)    ((x) + alpha_mv.rtc_port)
0580 #else
0581 # ifdef CONFIG_ALPHA_JENSEN
0582 #  define RTC_PORT(x)   (0x170+(x))
0583 # else
0584 #  define RTC_PORT(x)   (0x70 + (x))
0585 # endif
0586 #endif
0587 #define RTC_ALWAYS_BCD  0
0588 
0589 /*
0590  * Some mucking forons use if[n]def writeq to check if platform has it.
0591  * It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them
0592  * to play with; for now just use cpp anti-recursion logics and make sure
0593  * that damn thing is defined and expands to itself.
0594  */
0595 
0596 #define writeq writeq
0597 #define readq readq
0598 
0599 /*
0600  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
0601  * access
0602  */
0603 #define xlate_dev_mem_ptr(p)    __va(p)
0604 
0605 #endif /* __KERNEL__ */
0606 
0607 #endif /* __ALPHA_IO_H */