0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #ifndef _ASM_IO_H
0013 #define _ASM_IO_H
0014
0015 #define ARCH_HAS_IOREMAP_WC
0016
0017 #include <linux/compiler.h>
0018 #include <linux/kernel.h>
0019 #include <linux/types.h>
0020 #include <linux/irqflags.h>
0021
0022 #include <asm/addrspace.h>
0023 #include <asm/barrier.h>
0024 #include <asm/bug.h>
0025 #include <asm/byteorder.h>
0026 #include <asm/cpu.h>
0027 #include <asm/cpu-features.h>
0028 #include <asm-generic/iomap.h>
0029 #include <asm/page.h>
0030 #include <asm/pgtable-bits.h>
0031 #include <asm/processor.h>
0032 #include <asm/string.h>
0033 #include <mangle-port.h>
0034
0035
0036
0037
0038
0039
0040
0041 # define __raw_ioswabb(a, x) (x)
0042 # define __raw_ioswabw(a, x) (x)
0043 # define __raw_ioswabl(a, x) (x)
0044 # define __raw_ioswabq(a, x) (x)
0045 # define ____raw_ioswabq(a, x) (x)
0046
0047 # define __relaxed_ioswabb ioswabb
0048 # define __relaxed_ioswabw ioswabw
0049 # define __relaxed_ioswabl ioswabl
0050 # define __relaxed_ioswabq ioswabq
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062 extern unsigned long mips_io_port_base;
0063
0064 static inline void set_io_port_base(unsigned long base)
0065 {
0066 mips_io_port_base = base;
0067 }
0068
0069
0070
0071
0072
0073
0074
0075 #define HAVE_ARCH_PIO_SIZE
0076 #define PIO_OFFSET mips_io_port_base
0077 #define PIO_MASK IO_SPACE_LIMIT
0078 #define PIO_RESERVED 0x0UL
0079
0080
0081
0082
0083
0084
0085
0086 #define iobarrier_rw() mb()
0087 #define iobarrier_r() rmb()
0088 #define iobarrier_w() wmb()
0089 #define iobarrier_sync() iob()
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103 static inline unsigned long __virt_to_phys_nodebug(volatile const void *address)
0104 {
0105 return __pa(address);
0106 }
0107
0108 #ifdef CONFIG_DEBUG_VIRTUAL
0109 extern phys_addr_t __virt_to_phys(volatile const void *x);
0110 #else
0111 #define __virt_to_phys(x) __virt_to_phys_nodebug(x)
0112 #endif
0113
0114 #define virt_to_phys virt_to_phys
0115 static inline phys_addr_t virt_to_phys(const volatile void *x)
0116 {
0117 return __virt_to_phys(x);
0118 }
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132 static inline void * phys_to_virt(unsigned long address)
0133 {
0134 return __va(address);
0135 }
0136
0137
0138
0139
0140 static inline unsigned long isa_virt_to_bus(volatile void *address)
0141 {
0142 return virt_to_phys(address);
0143 }
0144
0145 static inline void *isa_bus_to_virt(unsigned long address)
0146 {
0147 return phys_to_virt(address);
0148 }
0149
0150
0151
0152
0153 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
0154
0155 void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
0156 unsigned long prot_val);
0157 void iounmap(const volatile void __iomem *addr);
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170 #define ioremap(offset, size) \
0171 ioremap_prot((offset), (size), _CACHE_UNCACHED)
0172 #define ioremap_uc ioremap
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189 #define ioremap_cache(offset, size) \
0190 ioremap_prot((offset), (size), _page_cachable_default)
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210 #define ioremap_wc(offset, size) \
0211 ioremap_prot((offset), (size), boot_cpu_data.writecombine)
0212
0213 #if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_CPU_LOONGSON64)
0214 #define war_io_reorder_wmb() wmb()
0215 #else
0216 #define war_io_reorder_wmb() barrier()
0217 #endif
0218
0219 #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, barrier, relax, irq) \
0220 \
0221 static inline void pfx##write##bwlq(type val, \
0222 volatile void __iomem *mem) \
0223 { \
0224 volatile type *__mem; \
0225 type __val; \
0226 \
0227 if (barrier) \
0228 iobarrier_rw(); \
0229 else \
0230 war_io_reorder_wmb(); \
0231 \
0232 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
0233 \
0234 __val = pfx##ioswab##bwlq(__mem, val); \
0235 \
0236 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
0237 *__mem = __val; \
0238 else if (cpu_has_64bits) { \
0239 unsigned long __flags; \
0240 type __tmp; \
0241 \
0242 if (irq) \
0243 local_irq_save(__flags); \
0244 __asm__ __volatile__( \
0245 ".set push" "\t\t# __writeq""\n\t" \
0246 ".set arch=r4000" "\n\t" \
0247 "dsll32 %L0, %L0, 0" "\n\t" \
0248 "dsrl32 %L0, %L0, 0" "\n\t" \
0249 "dsll32 %M0, %M0, 0" "\n\t" \
0250 "or %L0, %L0, %M0" "\n\t" \
0251 "sd %L0, %2" "\n\t" \
0252 ".set pop" "\n" \
0253 : "=r" (__tmp) \
0254 : "0" (__val), "m" (*__mem)); \
0255 if (irq) \
0256 local_irq_restore(__flags); \
0257 } else \
0258 BUG(); \
0259 } \
0260 \
0261 static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
0262 { \
0263 volatile type *__mem; \
0264 type __val; \
0265 \
0266 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
0267 \
0268 if (barrier) \
0269 iobarrier_rw(); \
0270 \
0271 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
0272 __val = *__mem; \
0273 else if (cpu_has_64bits) { \
0274 unsigned long __flags; \
0275 \
0276 if (irq) \
0277 local_irq_save(__flags); \
0278 __asm__ __volatile__( \
0279 ".set push" "\t\t# __readq" "\n\t" \
0280 ".set arch=r4000" "\n\t" \
0281 "ld %L0, %1" "\n\t" \
0282 "dsra32 %M0, %L0, 0" "\n\t" \
0283 "sll %L0, %L0, 0" "\n\t" \
0284 ".set pop" "\n" \
0285 : "=r" (__val) \
0286 : "m" (*__mem)); \
0287 if (irq) \
0288 local_irq_restore(__flags); \
0289 } else { \
0290 __val = 0; \
0291 BUG(); \
0292 } \
0293 \
0294 \
0295 if (!relax) \
0296 rmb(); \
0297 return pfx##ioswab##bwlq(__mem, __val); \
0298 }
0299
0300 #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, barrier, relax, p) \
0301 \
0302 static inline void pfx##out##bwlq##p(type val, unsigned long port) \
0303 { \
0304 volatile type *__addr; \
0305 type __val; \
0306 \
0307 if (barrier) \
0308 iobarrier_rw(); \
0309 else \
0310 war_io_reorder_wmb(); \
0311 \
0312 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
0313 \
0314 __val = pfx##ioswab##bwlq(__addr, val); \
0315 \
0316 \
0317 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
0318 \
0319 *__addr = __val; \
0320 } \
0321 \
0322 static inline type pfx##in##bwlq##p(unsigned long port) \
0323 { \
0324 volatile type *__addr; \
0325 type __val; \
0326 \
0327 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
0328 \
0329 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
0330 \
0331 if (barrier) \
0332 iobarrier_rw(); \
0333 \
0334 __val = *__addr; \
0335 \
0336 \
0337 if (!relax) \
0338 rmb(); \
0339 return pfx##ioswab##bwlq(__addr, __val); \
0340 }
0341
0342 #define __BUILD_MEMORY_PFX(bus, bwlq, type, relax) \
0343 \
0344 __BUILD_MEMORY_SINGLE(bus, bwlq, type, 1, relax, 1)
0345
0346 #define BUILDIO_MEM(bwlq, type) \
0347 \
0348 __BUILD_MEMORY_PFX(__raw_, bwlq, type, 0) \
0349 __BUILD_MEMORY_PFX(__relaxed_, bwlq, type, 1) \
0350 __BUILD_MEMORY_PFX(__mem_, bwlq, type, 0) \
0351 __BUILD_MEMORY_PFX(, bwlq, type, 0)
0352
0353 BUILDIO_MEM(b, u8)
0354 BUILDIO_MEM(w, u16)
0355 BUILDIO_MEM(l, u32)
0356 #ifdef CONFIG_64BIT
0357 BUILDIO_MEM(q, u64)
0358 #else
0359 __BUILD_MEMORY_PFX(__raw_, q, u64, 0)
0360 __BUILD_MEMORY_PFX(__mem_, q, u64, 0)
0361 #endif
0362
0363 #define __BUILD_IOPORT_PFX(bus, bwlq, type) \
0364 __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0,) \
0365 __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0, _p)
0366
0367 #define BUILDIO_IOPORT(bwlq, type) \
0368 __BUILD_IOPORT_PFX(, bwlq, type) \
0369 __BUILD_IOPORT_PFX(__mem_, bwlq, type)
0370
0371 BUILDIO_IOPORT(b, u8)
0372 BUILDIO_IOPORT(w, u16)
0373 BUILDIO_IOPORT(l, u32)
0374 #ifdef CONFIG_64BIT
0375 BUILDIO_IOPORT(q, u64)
0376 #endif
0377
0378 #define __BUILDIO(bwlq, type) \
0379 \
0380 __BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 1, 0, 0)
0381
0382 __BUILDIO(q, u64)
0383
0384 #define readb_relaxed __relaxed_readb
0385 #define readw_relaxed __relaxed_readw
0386 #define readl_relaxed __relaxed_readl
0387 #ifdef CONFIG_64BIT
0388 #define readq_relaxed __relaxed_readq
0389 #endif
0390
0391 #define writeb_relaxed __relaxed_writeb
0392 #define writew_relaxed __relaxed_writew
0393 #define writel_relaxed __relaxed_writel
0394 #ifdef CONFIG_64BIT
0395 #define writeq_relaxed __relaxed_writeq
0396 #endif
0397
0398 #define readb_be(addr) \
0399 __raw_readb((__force unsigned *)(addr))
0400 #define readw_be(addr) \
0401 be16_to_cpu(__raw_readw((__force unsigned *)(addr)))
0402 #define readl_be(addr) \
0403 be32_to_cpu(__raw_readl((__force unsigned *)(addr)))
0404 #define readq_be(addr) \
0405 be64_to_cpu(__raw_readq((__force unsigned *)(addr)))
0406
0407 #define writeb_be(val, addr) \
0408 __raw_writeb((val), (__force unsigned *)(addr))
0409 #define writew_be(val, addr) \
0410 __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr))
0411 #define writel_be(val, addr) \
0412 __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr))
0413 #define writeq_be(val, addr) \
0414 __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr))
0415
0416
0417
0418
0419 #ifdef CONFIG_64BIT
0420 #define readq readq
0421 #define writeq writeq
0422 #endif
0423
0424 #define __BUILD_MEMORY_STRING(bwlq, type) \
0425 \
0426 static inline void writes##bwlq(volatile void __iomem *mem, \
0427 const void *addr, unsigned int count) \
0428 { \
0429 const volatile type *__addr = addr; \
0430 \
0431 while (count--) { \
0432 __mem_write##bwlq(*__addr, mem); \
0433 __addr++; \
0434 } \
0435 } \
0436 \
0437 static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \
0438 unsigned int count) \
0439 { \
0440 volatile type *__addr = addr; \
0441 \
0442 while (count--) { \
0443 *__addr = __mem_read##bwlq(mem); \
0444 __addr++; \
0445 } \
0446 }
0447
0448 #define __BUILD_IOPORT_STRING(bwlq, type) \
0449 \
0450 static inline void outs##bwlq(unsigned long port, const void *addr, \
0451 unsigned int count) \
0452 { \
0453 const volatile type *__addr = addr; \
0454 \
0455 while (count--) { \
0456 __mem_out##bwlq(*__addr, port); \
0457 __addr++; \
0458 } \
0459 } \
0460 \
0461 static inline void ins##bwlq(unsigned long port, void *addr, \
0462 unsigned int count) \
0463 { \
0464 volatile type *__addr = addr; \
0465 \
0466 while (count--) { \
0467 *__addr = __mem_in##bwlq(port); \
0468 __addr++; \
0469 } \
0470 }
0471
0472 #define BUILDSTRING(bwlq, type) \
0473 \
0474 __BUILD_MEMORY_STRING(bwlq, type) \
0475 __BUILD_IOPORT_STRING(bwlq, type)
0476
0477 BUILDSTRING(b, u8)
0478 BUILDSTRING(w, u16)
0479 BUILDSTRING(l, u32)
0480 #ifdef CONFIG_64BIT
0481 BUILDSTRING(q, u64)
0482 #endif
0483
0484 static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
0485 {
0486 memset((void __force *) addr, val, count);
0487 }
0488 static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
0489 {
0490 memcpy(dst, (void __force *) src, count);
0491 }
0492 static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
0493 {
0494 memcpy((void __force *) dst, src, count);
0495 }
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517 #ifdef CONFIG_DMA_NONCOHERENT
0518
0519 extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
0520 extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
0521 extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
0522
0523 #define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size)
0524 #define dma_cache_wback(start, size) _dma_cache_wback(start, size)
0525 #define dma_cache_inv(start, size) _dma_cache_inv(start, size)
0526
0527 #else
0528
0529 #define dma_cache_wback_inv(start,size) \
0530 do { (void) (start); (void) (size); } while (0)
0531 #define dma_cache_wback(start,size) \
0532 do { (void) (start); (void) (size); } while (0)
0533 #define dma_cache_inv(start,size) \
0534 do { (void) (start); (void) (size); } while (0)
0535
0536 #endif
0537
0538
0539
0540
0541
0542
0543 #ifdef __MIPSEB__
0544 #define __CSR_32_ADJUST 4
0545 #else
0546 #define __CSR_32_ADJUST 0
0547 #endif
0548
0549 #define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
0550 #define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
0551
0552
0553
0554
0555
0556 #define xlate_dev_mem_ptr(p) __va(p)
0557
0558 void __ioread64_copy(void *to, const void __iomem *from, size_t count);
0559
0560 #endif