0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #ifndef __ASM_ARM_IO_H
0019 #define __ASM_ARM_IO_H
0020
0021 #ifdef __KERNEL__
0022
0023 #include <linux/string.h>
0024 #include <linux/types.h>
0025 #include <asm/byteorder.h>
0026 #include <asm/memory.h>
0027 #include <asm-generic/pci_iomap.h>
0028
0029
0030
0031
0032 #define isa_virt_to_bus virt_to_phys
0033 #define isa_bus_to_virt phys_to_virt
0034
0035
0036
0037
0038 extern void atomic_io_modify(void __iomem *reg, u32 mask, u32 set);
0039 extern void atomic_io_modify_relaxed(void __iomem *reg, u32 mask, u32 set);
0040
0041
0042
0043
0044
0045 void __raw_writesb(volatile void __iomem *addr, const void *data, int bytelen);
0046 void __raw_writesw(volatile void __iomem *addr, const void *data, int wordlen);
0047 void __raw_writesl(volatile void __iomem *addr, const void *data, int longlen);
0048
0049 void __raw_readsb(const volatile void __iomem *addr, void *data, int bytelen);
0050 void __raw_readsw(const volatile void __iomem *addr, void *data, int wordlen);
0051 void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen);
0052
0053 #if __LINUX_ARM_ARCH__ < 6
0054
0055
0056
0057
0058
0059 #define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
0060 #define __raw_writew(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)))
0061 #else
0062
0063
0064
0065
0066
0067 #define __raw_writew __raw_writew
0068 static inline void __raw_writew(u16 val, volatile void __iomem *addr)
0069 {
0070 asm volatile("strh %1, %0"
0071 : : "Q" (*(volatile u16 __force *)addr), "r" (val));
0072 }
0073
0074 #define __raw_readw __raw_readw
0075 static inline u16 __raw_readw(const volatile void __iomem *addr)
0076 {
0077 u16 val;
0078 asm volatile("ldrh %0, %1"
0079 : "=r" (val)
0080 : "Q" (*(volatile u16 __force *)addr));
0081 return val;
0082 }
0083 #endif
0084
0085 #define __raw_writeb __raw_writeb
0086 static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
0087 {
0088 asm volatile("strb %1, %0"
0089 : : "Qo" (*(volatile u8 __force *)addr), "r" (val));
0090 }
0091
0092 #define __raw_writel __raw_writel
0093 static inline void __raw_writel(u32 val, volatile void __iomem *addr)
0094 {
0095 asm volatile("str %1, %0"
0096 : : "Qo" (*(volatile u32 __force *)addr), "r" (val));
0097 }
0098
0099 #define __raw_readb __raw_readb
0100 static inline u8 __raw_readb(const volatile void __iomem *addr)
0101 {
0102 u8 val;
0103 asm volatile("ldrb %0, %1"
0104 : "=r" (val)
0105 : "Qo" (*(volatile u8 __force *)addr));
0106 return val;
0107 }
0108
0109 #define __raw_readl __raw_readl
0110 static inline u32 __raw_readl(const volatile void __iomem *addr)
0111 {
0112 u32 val;
0113 asm volatile("ldr %0, %1"
0114 : "=r" (val)
0115 : "Qo" (*(volatile u32 __force *)addr));
0116 return val;
0117 }
0118
0119
0120
0121
0122 #define MT_DEVICE 0
0123 #define MT_DEVICE_NONSHARED 1
0124 #define MT_DEVICE_CACHED 2
0125 #define MT_DEVICE_WC 3
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137 extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
0138 void *);
0139 extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
0140 extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
0141 void __arm_iomem_set_ro(void __iomem *ptr, size_t size);
0142
0143 extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
0144 unsigned int, void *);
0145
0146
0147
0148
0149 extern void __readwrite_bug(const char *fn);
0150
0151
0152
0153
0154 static inline void __iomem *__typesafe_io(unsigned long addr)
0155 {
0156 return (void __iomem *)addr;
0157 }
0158
0159 #define IOMEM(x) ((void __force __iomem *)(x))
0160
0161
0162 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
0163 #include <asm/barrier.h>
0164 #define __iormb() rmb()
0165 #define __iowmb() wmb()
0166 #else
0167 #define __iormb() do { } while (0)
0168 #define __iowmb() do { } while (0)
0169 #endif
0170
0171
0172 #define PCI_IO_VIRT_BASE 0xfee00000
0173 #define PCI_IOBASE ((void __iomem *)PCI_IO_VIRT_BASE)
0174
0175 #if defined(CONFIG_PCI) || IS_ENABLED(CONFIG_PCMCIA)
0176 void pci_ioremap_set_mem_type(int mem_type);
0177 #else
0178 static inline void pci_ioremap_set_mem_type(int mem_type) {}
0179 #endif
0180
0181 struct resource;
0182
0183 #define pci_remap_iospace pci_remap_iospace
0184 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194 #define pci_remap_cfgspace pci_remap_cfgspace
0195 void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size);
0196
0197
0198
0199 #ifdef CONFIG_NEED_MACH_IO_H
0200 #include <mach/io.h>
0201 #else
0202 #if IS_ENABLED(CONFIG_PCMCIA) || defined(CONFIG_PCI)
0203 #define IO_SPACE_LIMIT ((resource_size_t)0xfffff)
0204 #else
0205 #define IO_SPACE_LIMIT ((resource_size_t)0)
0206 #endif
0207 #define __io(a) __typesafe_io(PCI_IO_VIRT_BASE + ((a) & IO_SPACE_LIMIT))
0208 #endif
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234 #ifdef __io
0235 #define outb(v,p) ({ __iowmb(); __raw_writeb(v,__io(p)); })
0236 #define outw(v,p) ({ __iowmb(); __raw_writew((__force __u16) \
0237 cpu_to_le16(v),__io(p)); })
0238 #define outl(v,p) ({ __iowmb(); __raw_writel((__force __u32) \
0239 cpu_to_le32(v),__io(p)); })
0240
0241 #define inb(p) ({ __u8 __v = __raw_readb(__io(p)); __iormb(); __v; })
0242 #define inw(p) ({ __u16 __v = le16_to_cpu((__force __le16) \
0243 __raw_readw(__io(p))); __iormb(); __v; })
0244 #define inl(p) ({ __u32 __v = le32_to_cpu((__force __le32) \
0245 __raw_readl(__io(p))); __iormb(); __v; })
0246
0247 #define outsb(p,d,l) __raw_writesb(__io(p),d,l)
0248 #define outsw(p,d,l) __raw_writesw(__io(p),d,l)
0249 #define outsl(p,d,l) __raw_writesl(__io(p),d,l)
0250
0251 #define insb(p,d,l) __raw_readsb(__io(p),d,l)
0252 #define insw(p,d,l) __raw_readsw(__io(p),d,l)
0253 #define insl(p,d,l) __raw_readsl(__io(p),d,l)
0254 #endif
0255
0256
0257
0258
0259 extern void _memcpy_fromio(void *, const volatile void __iomem *, size_t);
0260 extern void _memcpy_toio(volatile void __iomem *, const void *, size_t);
0261 extern void _memset_io(volatile void __iomem *, int, size_t);
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273 #ifndef readl
0274 #define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; })
0275 #define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
0276 __raw_readw(c)); __r; })
0277 #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
0278 __raw_readl(c)); __r; })
0279
0280 #define writeb_relaxed(v,c) __raw_writeb(v,c)
0281 #define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c)
0282 #define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c)
0283
0284 #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
0285 #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
0286 #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
0287
0288 #define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); })
0289 #define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
0290 #define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
0291
0292 #define readsb(p,d,l) __raw_readsb(p,d,l)
0293 #define readsw(p,d,l) __raw_readsw(p,d,l)
0294 #define readsl(p,d,l) __raw_readsl(p,d,l)
0295
0296 #define writesb(p,d,l) __raw_writesb(p,d,l)
0297 #define writesw(p,d,l) __raw_writesw(p,d,l)
0298 #define writesl(p,d,l) __raw_writesl(p,d,l)
0299
0300 #ifndef __ARMBE__
0301 static inline void memset_io(volatile void __iomem *dst, unsigned c,
0302 size_t count)
0303 {
0304 extern void mmioset(void *, unsigned int, size_t);
0305 mmioset((void __force *)dst, c, count);
0306 }
0307 #define memset_io(dst,c,count) memset_io(dst,c,count)
0308
0309 static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
0310 size_t count)
0311 {
0312 extern void mmiocpy(void *, const void *, size_t);
0313 mmiocpy(to, (const void __force *)from, count);
0314 }
0315 #define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count)
0316
0317 static inline void memcpy_toio(volatile void __iomem *to, const void *from,
0318 size_t count)
0319 {
0320 extern void mmiocpy(void *, const void *, size_t);
0321 mmiocpy((void __force *)to, from, count);
0322 }
0323 #define memcpy_toio(to,from,count) memcpy_toio(to,from,count)
0324
0325 #else
0326 #define memset_io(c,v,l) _memset_io(c,(v),(l))
0327 #define memcpy_fromio(a,c,l) _memcpy_fromio((a),c,(l))
0328 #define memcpy_toio(c,a,l) _memcpy_toio(c,(a),(l))
0329 #endif
0330
0331 #endif
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368 void __iomem *ioremap(resource_size_t res_cookie, size_t size);
0369 #define ioremap ioremap
0370
0371
0372
0373
0374 void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
0375 #define ioremap_cache ioremap_cache
0376
0377 void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
0378 #define ioremap_wc ioremap_wc
0379 #define ioremap_wt ioremap_wc
0380
0381 void iounmap(volatile void __iomem *io_addr);
0382 #define iounmap iounmap
0383
0384 void *arch_memremap_wb(phys_addr_t phys_addr, size_t size);
0385 #define arch_memremap_wb arch_memremap_wb
0386
0387
0388
0389
0390 #define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
0391 #define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
0392
0393 #define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
0394 #define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
0395
0396 #ifndef ioport_map
0397 #define ioport_map ioport_map
0398 extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
0399 #endif
0400 #ifndef ioport_unmap
0401 #define ioport_unmap ioport_unmap
0402 extern void ioport_unmap(void __iomem *addr);
0403 #endif
0404
0405 struct pci_dev;
0406
0407 #define pci_iounmap pci_iounmap
0408 extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
0409
0410
0411
0412
0413
0414 #define xlate_dev_mem_ptr(p) __va(p)
0415
0416 #include <asm-generic/io.h>
0417
0418 #ifdef CONFIG_MMU
0419 #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
0420 extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
0421 extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
0422 extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
0423 unsigned long flags);
0424 #define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
0425 #endif
0426
0427
0428
0429
0430
0431 extern void register_isa_ports(unsigned int mmio, unsigned int io,
0432 unsigned int io_shift);
0433
0434 #endif
0435 #endif