Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Based on arch/arm/include/asm/io.h
0004  *
0005  * Copyright (C) 1996-2000 Russell King
0006  * Copyright (C) 2012 ARM Ltd.
0007  */
0008 #ifndef __ASM_IO_H
0009 #define __ASM_IO_H
0010 
0011 #include <linux/types.h>
0012 #include <linux/pgtable.h>
0013 
0014 #include <asm/byteorder.h>
0015 #include <asm/barrier.h>
0016 #include <asm/memory.h>
0017 #include <asm/early_ioremap.h>
0018 #include <asm/alternative.h>
0019 #include <asm/cpufeature.h>
0020 
0021 /*
0022  * Generic IO read/write.  These perform native-endian accesses.
0023  */
0024 #define __raw_writeb __raw_writeb
0025 static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
0026 {
0027     asm volatile("strb %w0, [%1]" : : "rZ" (val), "r" (addr));
0028 }
0029 
0030 #define __raw_writew __raw_writew
0031 static inline void __raw_writew(u16 val, volatile void __iomem *addr)
0032 {
0033     asm volatile("strh %w0, [%1]" : : "rZ" (val), "r" (addr));
0034 }
0035 
0036 #define __raw_writel __raw_writel
0037 static __always_inline void __raw_writel(u32 val, volatile void __iomem *addr)
0038 {
0039     asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr));
0040 }
0041 
0042 #define __raw_writeq __raw_writeq
0043 static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
0044 {
0045     asm volatile("str %x0, [%1]" : : "rZ" (val), "r" (addr));
0046 }
0047 
0048 #define __raw_readb __raw_readb
0049 static inline u8 __raw_readb(const volatile void __iomem *addr)
0050 {
0051     u8 val;
0052     asm volatile(ALTERNATIVE("ldrb %w0, [%1]",
0053                  "ldarb %w0, [%1]",
0054                  ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
0055              : "=r" (val) : "r" (addr));
0056     return val;
0057 }
0058 
0059 #define __raw_readw __raw_readw
0060 static inline u16 __raw_readw(const volatile void __iomem *addr)
0061 {
0062     u16 val;
0063 
0064     asm volatile(ALTERNATIVE("ldrh %w0, [%1]",
0065                  "ldarh %w0, [%1]",
0066                  ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
0067              : "=r" (val) : "r" (addr));
0068     return val;
0069 }
0070 
0071 #define __raw_readl __raw_readl
0072 static __always_inline u32 __raw_readl(const volatile void __iomem *addr)
0073 {
0074     u32 val;
0075     asm volatile(ALTERNATIVE("ldr %w0, [%1]",
0076                  "ldar %w0, [%1]",
0077                  ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
0078              : "=r" (val) : "r" (addr));
0079     return val;
0080 }
0081 
0082 #define __raw_readq __raw_readq
0083 static inline u64 __raw_readq(const volatile void __iomem *addr)
0084 {
0085     u64 val;
0086     asm volatile(ALTERNATIVE("ldr %0, [%1]",
0087                  "ldar %0, [%1]",
0088                  ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
0089              : "=r" (val) : "r" (addr));
0090     return val;
0091 }
0092 
0093 /* IO barriers */
0094 #define __io_ar(v)                          \
0095 ({                                  \
0096     unsigned long tmp;                      \
0097                                     \
0098     dma_rmb();                              \
0099                                     \
0100     /*                              \
0101      * Create a dummy control dependency from the IO read to any    \
0102      * later instructions. This ensures that a subsequent call to   \
0103      * udelay() will be ordered due to the ISB in get_cycles(). \
0104      */                             \
0105     asm volatile("eor   %0, %1, %1\n"               \
0106              "cbnz  %0, ."                  \
0107              : "=r" (tmp) : "r" ((unsigned long)(v))        \
0108              : "memory");                   \
0109 })
0110 
0111 #define __io_bw()       dma_wmb()
0112 #define __io_br(v)
0113 #define __io_aw(v)
0114 
0115 /* arm64-specific, don't use in portable drivers */
0116 #define __iormb(v)      __io_ar(v)
0117 #define __iowmb()       __io_bw()
0118 #define __iomb()        dma_mb()
0119 
0120 /*
0121  *  I/O port access primitives.
0122  */
0123 #define arch_has_dev_port() (1)
0124 #define IO_SPACE_LIMIT      (PCI_IO_SIZE - 1)
0125 #define PCI_IOBASE      ((void __iomem *)PCI_IO_START)
0126 
0127 /*
0128  * String version of I/O memory access operations.
0129  */
0130 extern void __memcpy_fromio(void *, const volatile void __iomem *, size_t);
0131 extern void __memcpy_toio(volatile void __iomem *, const void *, size_t);
0132 extern void __memset_io(volatile void __iomem *, int, size_t);
0133 
0134 #define memset_io(c,v,l)    __memset_io((c),(v),(l))
0135 #define memcpy_fromio(a,c,l)    __memcpy_fromio((a),(c),(l))
0136 #define memcpy_toio(c,a,l)  __memcpy_toio((c),(a),(l))
0137 
0138 /*
0139  * I/O memory mapping functions.
0140  */
0141 
0142 bool ioremap_allowed(phys_addr_t phys_addr, size_t size, unsigned long prot);
0143 #define ioremap_allowed ioremap_allowed
0144 
0145 #define _PAGE_IOREMAP PROT_DEVICE_nGnRE
0146 
0147 #define ioremap_wc(addr, size)  \
0148     ioremap_prot((addr), (size), PROT_NORMAL_NC)
0149 #define ioremap_np(addr, size)  \
0150     ioremap_prot((addr), (size), PROT_DEVICE_nGnRnE)
0151 
0152 /*
0153  * io{read,write}{16,32,64}be() macros
0154  */
0155 #define ioread16be(p)       ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(__v); __v; })
0156 #define ioread32be(p)       ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(__v); __v; })
0157 #define ioread64be(p)       ({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(__v); __v; })
0158 
0159 #define iowrite16be(v,p)    ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
0160 #define iowrite32be(v,p)    ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
0161 #define iowrite64be(v,p)    ({ __iowmb(); __raw_writeq((__force __u64)cpu_to_be64(v), p); })
0162 
0163 #include <asm-generic/io.h>
0164 
0165 #define ioremap_cache ioremap_cache
0166 static inline void __iomem *ioremap_cache(phys_addr_t addr, size_t size)
0167 {
0168     if (pfn_is_map_memory(__phys_to_pfn(addr)))
0169         return (void __iomem *)__phys_to_virt(addr);
0170 
0171     return ioremap_prot(addr, size, PROT_NORMAL);
0172 }
0173 
0174 /*
0175  * More restrictive address range checking than the default implementation
0176  * (PHYS_OFFSET and PHYS_MASK taken into account).
0177  */
0178 #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
0179 extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
0180 extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
0181 
0182 extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
0183                     unsigned long flags);
0184 #define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
0185 
0186 #endif  /* __ASM_IO_H */