Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
0004  */
0005 
0006 #ifndef _ASM_ARC_IO_H
0007 #define _ASM_ARC_IO_H
0008 
0009 #include <linux/types.h>
0010 #include <asm/byteorder.h>
0011 #include <asm/page.h>
0012 #include <asm/unaligned.h>
0013 
0014 #ifdef CONFIG_ISA_ARCV2
0015 #include <asm/barrier.h>
0016 #define __iormb()       rmb()
0017 #define __iowmb()       wmb()
0018 #else
0019 #define __iormb()       do { } while (0)
0020 #define __iowmb()       do { } while (0)
0021 #endif
0022 
0023 extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
0024 extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
0025                   unsigned long flags);
0026 static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
0027 {
0028     return (void __iomem *)port;
0029 }
0030 
0031 static inline void ioport_unmap(void __iomem *addr)
0032 {
0033 }
0034 
0035 extern void iounmap(const void __iomem *addr);
0036 
0037 /*
0038  * io{read,write}{16,32}be() macros
0039  */
0040 #define ioread16be(p)       ({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
0041 #define ioread32be(p)       ({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
0042 
0043 #define iowrite16be(v,p)    ({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
0044 #define iowrite32be(v,p)    ({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
0045 
0046 /* Change struct page to physical address */
0047 #define page_to_phys(page)      (page_to_pfn(page) << PAGE_SHIFT)
0048 
0049 #define __raw_readb __raw_readb
0050 static inline u8 __raw_readb(const volatile void __iomem *addr)
0051 {
0052     u8 b;
0053 
0054     __asm__ __volatile__(
0055     "   ldb%U1 %0, %1   \n"
0056     : "=r" (b)
0057     : "m" (*(volatile u8 __force *)addr)
0058     : "memory");
0059 
0060     return b;
0061 }
0062 
0063 #define __raw_readw __raw_readw
0064 static inline u16 __raw_readw(const volatile void __iomem *addr)
0065 {
0066     u16 s;
0067 
0068     __asm__ __volatile__(
0069     "   ldw%U1 %0, %1   \n"
0070     : "=r" (s)
0071     : "m" (*(volatile u16 __force *)addr)
0072     : "memory");
0073 
0074     return s;
0075 }
0076 
0077 #define __raw_readl __raw_readl
0078 static inline u32 __raw_readl(const volatile void __iomem *addr)
0079 {
0080     u32 w;
0081 
0082     __asm__ __volatile__(
0083     "   ld%U1 %0, %1    \n"
0084     : "=r" (w)
0085     : "m" (*(volatile u32 __force *)addr)
0086     : "memory");
0087 
0088     return w;
0089 }
0090 
0091 /*
0092  * {read,write}s{b,w,l}() repeatedly access the same IO address in
0093  * native endianness in 8-, 16-, 32-bit chunks {into,from} memory,
0094  * @count times
0095  */
0096 #define __raw_readsx(t,f) \
0097 static inline void __raw_reads##f(const volatile void __iomem *addr,    \
0098                   void *ptr, unsigned int count)    \
0099 {                                   \
0100     bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;    \
0101     u##t *buf = ptr;                        \
0102                                     \
0103     if (!count)                         \
0104         return;                         \
0105                                     \
0106     /* Some ARC CPU's don't support unaligned accesses */       \
0107     if (is_aligned) {                       \
0108         do {                            \
0109             u##t x = __raw_read##f(addr);           \
0110             *buf++ = x;                 \
0111         } while (--count);                  \
0112     } else {                            \
0113         do {                            \
0114             u##t x = __raw_read##f(addr);           \
0115             put_unaligned(x, buf++);            \
0116         } while (--count);                  \
0117     }                               \
0118 }
0119 
0120 #define __raw_readsb __raw_readsb
0121 __raw_readsx(8, b)
0122 #define __raw_readsw __raw_readsw
0123 __raw_readsx(16, w)
0124 #define __raw_readsl __raw_readsl
0125 __raw_readsx(32, l)
0126 
0127 #define __raw_writeb __raw_writeb
0128 static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
0129 {
0130     __asm__ __volatile__(
0131     "   stb%U1 %0, %1   \n"
0132     :
0133     : "r" (b), "m" (*(volatile u8 __force *)addr)
0134     : "memory");
0135 }
0136 
0137 #define __raw_writew __raw_writew
0138 static inline void __raw_writew(u16 s, volatile void __iomem *addr)
0139 {
0140     __asm__ __volatile__(
0141     "   stw%U1 %0, %1   \n"
0142     :
0143     : "r" (s), "m" (*(volatile u16 __force *)addr)
0144     : "memory");
0145 
0146 }
0147 
0148 #define __raw_writel __raw_writel
0149 static inline void __raw_writel(u32 w, volatile void __iomem *addr)
0150 {
0151     __asm__ __volatile__(
0152     "   st%U1 %0, %1    \n"
0153     :
0154     : "r" (w), "m" (*(volatile u32 __force *)addr)
0155     : "memory");
0156 
0157 }
0158 
0159 #define __raw_writesx(t,f)                      \
0160 static inline void __raw_writes##f(volatile void __iomem *addr,     \
0161                    const void *ptr, unsigned int count) \
0162 {                                   \
0163     bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;    \
0164     const u##t *buf = ptr;                      \
0165                                     \
0166     if (!count)                         \
0167         return;                         \
0168                                     \
0169     /* Some ARC CPU's don't support unaligned accesses */       \
0170     if (is_aligned) {                       \
0171         do {                            \
0172             __raw_write##f(*buf++, addr);           \
0173         } while (--count);                  \
0174     } else {                            \
0175         do {                            \
0176             __raw_write##f(get_unaligned(buf++), addr); \
0177         } while (--count);                  \
0178     }                               \
0179 }
0180 
0181 #define __raw_writesb __raw_writesb
0182 __raw_writesx(8, b)
0183 #define __raw_writesw __raw_writesw
0184 __raw_writesx(16, w)
0185 #define __raw_writesl __raw_writesl
0186 __raw_writesx(32, l)
0187 
0188 /*
0189  * MMIO can also get buffered/optimized in micro-arch, so barriers needed
0190  * Based on ARM model for the typical use case
0191  *
0192  *  <ST [DMA buffer]>
0193  *  <writel MMIO "go" reg>
0194  *  or:
0195  *  <readl MMIO "status" reg>
0196  *  <LD [DMA buffer]>
0197  *
0198  * http://lkml.kernel.org/r/20150622133656.GG1583@arm.com
0199  */
0200 #define readb(c)        ({ u8  __v = readb_relaxed(c); __iormb(); __v; })
0201 #define readw(c)        ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
0202 #define readl(c)        ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
0203 #define readsb(p,d,l)       ({ __raw_readsb(p,d,l); __iormb(); })
0204 #define readsw(p,d,l)       ({ __raw_readsw(p,d,l); __iormb(); })
0205 #define readsl(p,d,l)       ({ __raw_readsl(p,d,l); __iormb(); })
0206 
0207 #define writeb(v,c)     ({ __iowmb(); writeb_relaxed(v,c); })
0208 #define writew(v,c)     ({ __iowmb(); writew_relaxed(v,c); })
0209 #define writel(v,c)     ({ __iowmb(); writel_relaxed(v,c); })
0210 #define writesb(p,d,l)      ({ __iowmb(); __raw_writesb(p,d,l); })
0211 #define writesw(p,d,l)      ({ __iowmb(); __raw_writesw(p,d,l); })
0212 #define writesl(p,d,l)      ({ __iowmb(); __raw_writesl(p,d,l); })
0213 
0214 /*
0215  * Relaxed API for drivers which can handle barrier ordering themselves
0216  *
0217  * Also these are defined to perform little endian accesses.
0218  * To provide the typical device register semantics of fixed endian,
0219  * swap the byte order for Big Endian
0220  *
0221  * http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de
0222  */
0223 #define readb_relaxed(c)    __raw_readb(c)
0224 #define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
0225                     __raw_readw(c)); __r; })
0226 #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
0227                     __raw_readl(c)); __r; })
0228 
0229 #define writeb_relaxed(v,c) __raw_writeb(v,c)
0230 #define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c)
0231 #define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c)
0232 
0233 #include <asm-generic/io.h>
0234 
0235 #endif /* _ASM_ARC_IO_H */