Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 /*
0003  * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
0004  *                    <benh@kernel.crashing.org>
0005  */
0006 
0007 #ifndef _ASM_POWERPC_DCR_NATIVE_H
0008 #define _ASM_POWERPC_DCR_NATIVE_H
0009 #ifdef __KERNEL__
0010 #ifndef __ASSEMBLY__
0011 
0012 #include <linux/spinlock.h>
0013 #include <asm/cputable.h>
0014 #include <asm/cpu_has_feature.h>
0015 #include <linux/stringify.h>
0016 
0017 typedef struct {
0018     unsigned int base;
0019 } dcr_host_native_t;
0020 
0021 static inline bool dcr_map_ok_native(dcr_host_native_t host)
0022 {
0023     return true;
0024 }
0025 
0026 #define dcr_map_native(dev, dcr_n, dcr_c) \
0027     ((dcr_host_native_t){ .base = (dcr_n) })
0028 #define dcr_unmap_native(host, dcr_c)       do {} while (0)
0029 #define dcr_read_native(host, dcr_n)        mfdcr(dcr_n + host.base)
0030 #define dcr_write_native(host, dcr_n, value)    mtdcr(dcr_n + host.base, value)
0031 
0032 /* Table based DCR accessors */
0033 extern void __mtdcr(unsigned int reg, unsigned int val);
0034 extern unsigned int __mfdcr(unsigned int reg);
0035 
0036 /* mfdcrx/mtdcrx instruction based accessors. We hand code
0037  * the opcodes in order not to depend on newer binutils
0038  */
0039 static inline unsigned int mfdcrx(unsigned int reg)
0040 {
0041     unsigned int ret;
0042     asm volatile(".long 0x7c000206 | (%0 << 21) | (%1 << 16)"
0043              : "=r" (ret) : "r" (reg));
0044     return ret;
0045 }
0046 
0047 static inline void mtdcrx(unsigned int reg, unsigned int val)
0048 {
0049     asm volatile(".long 0x7c000306 | (%0 << 21) | (%1 << 16)"
0050              : : "r" (val), "r" (reg));
0051 }
0052 
0053 #define mfdcr(rn)                       \
0054     ({unsigned int rval;                    \
0055     if (__builtin_constant_p(rn) && rn < 1024)      \
0056         asm volatile("mfdcr %0, %1" : "=r" (rval)   \
0057                   : "n" (rn));          \
0058     else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR)))  \
0059         rval = mfdcrx(rn);              \
0060     else                            \
0061         rval = __mfdcr(rn);             \
0062     rval;})
0063 
0064 #define mtdcr(rn, v)                        \
0065 do {                                \
0066     if (__builtin_constant_p(rn) && rn < 1024)      \
0067         asm volatile("mtdcr %0, %1"         \
0068                   : : "n" (rn), "r" (v));       \
0069     else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR)))  \
0070         mtdcrx(rn, v);                  \
0071     else                            \
0072         __mtdcr(rn, v);                 \
0073 } while (0)
0074 
0075 /* R/W of indirect DCRs make use of standard naming conventions for DCRs */
0076 extern spinlock_t dcr_ind_lock;
0077 
0078 static inline unsigned __mfdcri(int base_addr, int base_data, int reg)
0079 {
0080     unsigned long flags;
0081     unsigned int val;
0082 
0083     spin_lock_irqsave(&dcr_ind_lock, flags);
0084     if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
0085         mtdcrx(base_addr, reg);
0086         val = mfdcrx(base_data);
0087     } else {
0088         __mtdcr(base_addr, reg);
0089         val = __mfdcr(base_data);
0090     }
0091     spin_unlock_irqrestore(&dcr_ind_lock, flags);
0092     return val;
0093 }
0094 
0095 static inline void __mtdcri(int base_addr, int base_data, int reg,
0096                 unsigned val)
0097 {
0098     unsigned long flags;
0099 
0100     spin_lock_irqsave(&dcr_ind_lock, flags);
0101     if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
0102         mtdcrx(base_addr, reg);
0103         mtdcrx(base_data, val);
0104     } else {
0105         __mtdcr(base_addr, reg);
0106         __mtdcr(base_data, val);
0107     }
0108     spin_unlock_irqrestore(&dcr_ind_lock, flags);
0109 }
0110 
0111 static inline void __dcri_clrset(int base_addr, int base_data, int reg,
0112                  unsigned clr, unsigned set)
0113 {
0114     unsigned long flags;
0115     unsigned int val;
0116 
0117     spin_lock_irqsave(&dcr_ind_lock, flags);
0118     if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
0119         mtdcrx(base_addr, reg);
0120         val = (mfdcrx(base_data) & ~clr) | set;
0121         mtdcrx(base_data, val);
0122     } else {
0123         __mtdcr(base_addr, reg);
0124         val = (__mfdcr(base_data) & ~clr) | set;
0125         __mtdcr(base_data, val);
0126     }
0127     spin_unlock_irqrestore(&dcr_ind_lock, flags);
0128 }
0129 
0130 #define mfdcri(base, reg)   __mfdcri(DCRN_ ## base ## _CONFIG_ADDR, \
0131                      DCRN_ ## base ## _CONFIG_DATA, \
0132                      reg)
0133 
0134 #define mtdcri(base, reg, data) __mtdcri(DCRN_ ## base ## _CONFIG_ADDR, \
0135                      DCRN_ ## base ## _CONFIG_DATA, \
0136                      reg, data)
0137 
0138 #define dcri_clrset(base, reg, clr, set)    __dcri_clrset(DCRN_ ## base ## _CONFIG_ADDR,    \
0139                                   DCRN_ ## base ## _CONFIG_DATA,    \
0140                                   reg, clr, set)
0141 
0142 #endif /* __ASSEMBLY__ */
0143 #endif /* __KERNEL__ */
0144 #endif /* _ASM_POWERPC_DCR_NATIVE_H */