Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * I/O string operations
0004  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
0005  *    Copyright (C) 2006 IBM Corporation
0006  *
0007  * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
0008  * and Paul Mackerras.
0009  *
0010  * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
0011  * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
0012  *
0013  * Rewritten in C by Stephen Rothwell.
0014  */
0015 #include <linux/kernel.h>
0016 #include <linux/types.h>
0017 #include <linux/compiler.h>
0018 #include <linux/export.h>
0019 
0020 #include <asm/io.h>
0021 #include <asm/firmware.h>
0022 #include <asm/bug.h>
0023 
0024 /* See definition in io.h */
0025 bool isa_io_special;
0026 
0027 void _insb(const volatile u8 __iomem *port, void *buf, long count)
0028 {
0029     u8 *tbuf = buf;
0030     u8 tmp;
0031 
0032     if (unlikely(count <= 0))
0033         return;
0034     asm volatile("sync");
0035     do {
0036         tmp = *port;
0037         eieio();
0038         *tbuf++ = tmp;
0039     } while (--count != 0);
0040     asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
0041 }
0042 EXPORT_SYMBOL(_insb);
0043 
0044 void _outsb(volatile u8 __iomem *port, const void *buf, long count)
0045 {
0046     const u8 *tbuf = buf;
0047 
0048     if (unlikely(count <= 0))
0049         return;
0050     asm volatile("sync");
0051     do {
0052         *port = *tbuf++;
0053     } while (--count != 0);
0054     asm volatile("sync");
0055 }
0056 EXPORT_SYMBOL(_outsb);
0057 
0058 void _insw_ns(const volatile u16 __iomem *port, void *buf, long count)
0059 {
0060     u16 *tbuf = buf;
0061     u16 tmp;
0062 
0063     if (unlikely(count <= 0))
0064         return;
0065     asm volatile("sync");
0066     do {
0067         tmp = *port;
0068         eieio();
0069         *tbuf++ = tmp;
0070     } while (--count != 0);
0071     asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
0072 }
0073 EXPORT_SYMBOL(_insw_ns);
0074 
0075 void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count)
0076 {
0077     const u16 *tbuf = buf;
0078 
0079     if (unlikely(count <= 0))
0080         return;
0081     asm volatile("sync");
0082     do {
0083         *port = *tbuf++;
0084     } while (--count != 0);
0085     asm volatile("sync");
0086 }
0087 EXPORT_SYMBOL(_outsw_ns);
0088 
0089 void _insl_ns(const volatile u32 __iomem *port, void *buf, long count)
0090 {
0091     u32 *tbuf = buf;
0092     u32 tmp;
0093 
0094     if (unlikely(count <= 0))
0095         return;
0096     asm volatile("sync");
0097     do {
0098         tmp = *port;
0099         eieio();
0100         *tbuf++ = tmp;
0101     } while (--count != 0);
0102     asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
0103 }
0104 EXPORT_SYMBOL(_insl_ns);
0105 
0106 void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count)
0107 {
0108     const u32 *tbuf = buf;
0109 
0110     if (unlikely(count <= 0))
0111         return;
0112     asm volatile("sync");
0113     do {
0114         *port = *tbuf++;
0115     } while (--count != 0);
0116     asm volatile("sync");
0117 }
0118 EXPORT_SYMBOL(_outsl_ns);
0119 
0120 #define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0)
0121 
0122 notrace void
0123 _memset_io(volatile void __iomem *addr, int c, unsigned long n)
0124 {
0125     void *p = (void __force *)addr;
0126     u32 lc = c;
0127     lc |= lc << 8;
0128     lc |= lc << 16;
0129 
0130     __asm__ __volatile__ ("sync" : : : "memory");
0131     while(n && !IO_CHECK_ALIGN(p, 4)) {
0132         *((volatile u8 *)p) = c;
0133         p++;
0134         n--;
0135     }
0136     while(n >= 4) {
0137         *((volatile u32 *)p) = lc;
0138         p += 4;
0139         n -= 4;
0140     }
0141     while(n) {
0142         *((volatile u8 *)p) = c;
0143         p++;
0144         n--;
0145     }
0146     __asm__ __volatile__ ("sync" : : : "memory");
0147 }
0148 EXPORT_SYMBOL(_memset_io);
0149 
0150 void _memcpy_fromio(void *dest, const volatile void __iomem *src,
0151             unsigned long n)
0152 {
0153     void *vsrc = (void __force *) src;
0154 
0155     __asm__ __volatile__ ("sync" : : : "memory");
0156     while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) {
0157         *((u8 *)dest) = *((volatile u8 *)vsrc);
0158         eieio();
0159         vsrc++;
0160         dest++;
0161         n--;
0162     }
0163     while(n >= 4) {
0164         *((u32 *)dest) = *((volatile u32 *)vsrc);
0165         eieio();
0166         vsrc += 4;
0167         dest += 4;
0168         n -= 4;
0169     }
0170     while(n) {
0171         *((u8 *)dest) = *((volatile u8 *)vsrc);
0172         eieio();
0173         vsrc++;
0174         dest++;
0175         n--;
0176     }
0177     __asm__ __volatile__ ("sync" : : : "memory");
0178 }
0179 EXPORT_SYMBOL(_memcpy_fromio);
0180 
0181 void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
0182 {
0183     void *vdest = (void __force *) dest;
0184 
0185     __asm__ __volatile__ ("sync" : : : "memory");
0186     while(n && (!IO_CHECK_ALIGN(vdest, 4) || !IO_CHECK_ALIGN(src, 4))) {
0187         *((volatile u8 *)vdest) = *((u8 *)src);
0188         src++;
0189         vdest++;
0190         n--;
0191     }
0192     while(n >= 4) {
0193         *((volatile u32 *)vdest) = *((volatile u32 *)src);
0194         src += 4;
0195         vdest += 4;
0196         n-=4;
0197     }
0198     while(n) {
0199         *((volatile u8 *)vdest) = *((u8 *)src);
0200         src++;
0201         vdest++;
0202         n--;
0203     }
0204     __asm__ __volatile__ ("sync" : : : "memory");
0205 }
0206 EXPORT_SYMBOL(_memcpy_toio);