0001
0002
0003
0004
0005
0006
0007
0008 #ifndef EFX_IO_H
0009 #define EFX_IO_H
0010
0011 #include <linux/io.h>
0012 #include <linux/spinlock.h>
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062 #if BITS_PER_LONG == 64
0063 #define EFX_USE_QWORD_IO 1
0064 #endif
0065
0066
0067
0068
0069
0070
0071 #ifdef CONFIG_X86_64
0072
0073 #ifdef ARCH_HAS_IOREMAP_WC
0074 #define EFX_USE_PIO 1
0075 #endif
0076 #endif
0077
0078 static inline u32 efx_reg(struct efx_nic *efx, unsigned int reg)
0079 {
0080 return efx->reg_base + reg;
0081 }
0082
0083 #ifdef EFX_USE_QWORD_IO
0084 static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
0085 unsigned int reg)
0086 {
0087 __raw_writeq((__force u64)value, efx->membase + reg);
0088 }
0089 static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg)
0090 {
0091 return (__force __le64)__raw_readq(efx->membase + reg);
0092 }
0093 #endif
0094
0095 static inline void _efx_writed(struct efx_nic *efx, __le32 value,
0096 unsigned int reg)
0097 {
0098 __raw_writel((__force u32)value, efx->membase + reg);
0099 }
0100 static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
0101 {
0102 return (__force __le32)__raw_readl(efx->membase + reg);
0103 }
0104
0105
0106 static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value,
0107 unsigned int reg)
0108 {
0109 unsigned long flags __attribute__ ((unused));
0110
0111 netif_vdbg(efx, hw, efx->net_dev,
0112 "writing register %x with " EFX_OWORD_FMT "\n", reg,
0113 EFX_OWORD_VAL(*value));
0114
0115 spin_lock_irqsave(&efx->biu_lock, flags);
0116 #ifdef EFX_USE_QWORD_IO
0117 _efx_writeq(efx, value->u64[0], reg + 0);
0118 _efx_writeq(efx, value->u64[1], reg + 8);
0119 #else
0120 _efx_writed(efx, value->u32[0], reg + 0);
0121 _efx_writed(efx, value->u32[1], reg + 4);
0122 _efx_writed(efx, value->u32[2], reg + 8);
0123 _efx_writed(efx, value->u32[3], reg + 12);
0124 #endif
0125 spin_unlock_irqrestore(&efx->biu_lock, flags);
0126 }
0127
0128
0129 static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
0130 const efx_qword_t *value, unsigned int index)
0131 {
0132 unsigned int addr = index * sizeof(*value);
0133 unsigned long flags __attribute__ ((unused));
0134
0135 netif_vdbg(efx, hw, efx->net_dev,
0136 "writing SRAM address %x with " EFX_QWORD_FMT "\n",
0137 addr, EFX_QWORD_VAL(*value));
0138
0139 spin_lock_irqsave(&efx->biu_lock, flags);
0140 #ifdef EFX_USE_QWORD_IO
0141 __raw_writeq((__force u64)value->u64[0], membase + addr);
0142 #else
0143 __raw_writel((__force u32)value->u32[0], membase + addr);
0144 __raw_writel((__force u32)value->u32[1], membase + addr + 4);
0145 #endif
0146 spin_unlock_irqrestore(&efx->biu_lock, flags);
0147 }
0148
0149
0150 static inline void efx_writed(struct efx_nic *efx, const efx_dword_t *value,
0151 unsigned int reg)
0152 {
0153 netif_vdbg(efx, hw, efx->net_dev,
0154 "writing register %x with "EFX_DWORD_FMT"\n",
0155 reg, EFX_DWORD_VAL(*value));
0156
0157
0158 _efx_writed(efx, value->u32[0], reg);
0159 }
0160
0161
0162 static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
0163 unsigned int reg)
0164 {
0165 unsigned long flags __attribute__ ((unused));
0166
0167 spin_lock_irqsave(&efx->biu_lock, flags);
0168 value->u32[0] = _efx_readd(efx, reg + 0);
0169 value->u32[1] = _efx_readd(efx, reg + 4);
0170 value->u32[2] = _efx_readd(efx, reg + 8);
0171 value->u32[3] = _efx_readd(efx, reg + 12);
0172 spin_unlock_irqrestore(&efx->biu_lock, flags);
0173
0174 netif_vdbg(efx, hw, efx->net_dev,
0175 "read from register %x, got " EFX_OWORD_FMT "\n", reg,
0176 EFX_OWORD_VAL(*value));
0177 }
0178
0179
0180 static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
0181 efx_qword_t *value, unsigned int index)
0182 {
0183 unsigned int addr = index * sizeof(*value);
0184 unsigned long flags __attribute__ ((unused));
0185
0186 spin_lock_irqsave(&efx->biu_lock, flags);
0187 #ifdef EFX_USE_QWORD_IO
0188 value->u64[0] = (__force __le64)__raw_readq(membase + addr);
0189 #else
0190 value->u32[0] = (__force __le32)__raw_readl(membase + addr);
0191 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
0192 #endif
0193 spin_unlock_irqrestore(&efx->biu_lock, flags);
0194
0195 netif_vdbg(efx, hw, efx->net_dev,
0196 "read from SRAM address %x, got "EFX_QWORD_FMT"\n",
0197 addr, EFX_QWORD_VAL(*value));
0198 }
0199
0200
0201 static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
0202 unsigned int reg)
0203 {
0204 value->u32[0] = _efx_readd(efx, reg);
0205 netif_vdbg(efx, hw, efx->net_dev,
0206 "read from register %x, got "EFX_DWORD_FMT"\n",
0207 reg, EFX_DWORD_VAL(*value));
0208 }
0209
0210
0211 static inline void
0212 efx_writeo_table(struct efx_nic *efx, const efx_oword_t *value,
0213 unsigned int reg, unsigned int index)
0214 {
0215 efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
0216 }
0217
0218
0219 static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
0220 unsigned int reg, unsigned int index)
0221 {
0222 efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
0223 }
0224
0225
0226
0227
0228 #define EFX_DEFAULT_VI_STRIDE 0x2000
0229 #define EF100_DEFAULT_VI_STRIDE 0x10000
0230
0231
0232 static inline unsigned int efx_paged_reg(struct efx_nic *efx, unsigned int page,
0233 unsigned int reg)
0234 {
0235 return page * efx->vi_stride + reg;
0236 }
0237
0238
0239 static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
0240 unsigned int reg, unsigned int page)
0241 {
0242 reg = efx_paged_reg(efx, page, reg);
0243
0244 netif_vdbg(efx, hw, efx->net_dev,
0245 "writing register %x with " EFX_OWORD_FMT "\n", reg,
0246 EFX_OWORD_VAL(*value));
0247
0248 #ifdef EFX_USE_QWORD_IO
0249 _efx_writeq(efx, value->u64[0], reg + 0);
0250 _efx_writeq(efx, value->u64[1], reg + 8);
0251 #else
0252 _efx_writed(efx, value->u32[0], reg + 0);
0253 _efx_writed(efx, value->u32[1], reg + 4);
0254 _efx_writed(efx, value->u32[2], reg + 8);
0255 _efx_writed(efx, value->u32[3], reg + 12);
0256 #endif
0257 }
0258 #define efx_writeo_page(efx, value, reg, page) \
0259 _efx_writeo_page(efx, value, \
0260 reg + \
0261 BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
0262 page)
0263
0264
0265
0266
0267 static inline void
0268 _efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
0269 unsigned int reg, unsigned int page)
0270 {
0271 efx_writed(efx, value, efx_paged_reg(efx, page, reg));
0272 }
0273 #define efx_writed_page(efx, value, reg, page) \
0274 _efx_writed_page(efx, value, \
0275 reg + \
0276 BUILD_BUG_ON_ZERO((reg) != 0x180 && \
0277 (reg) != 0x200 && \
0278 (reg) != 0x400 && \
0279 (reg) != 0x420 && \
0280 (reg) != 0x830 && \
0281 (reg) != 0x83c && \
0282 (reg) != 0xa18 && \
0283 (reg) != 0xa1c), \
0284 page)
0285
0286
0287
0288
0289
0290 static inline void _efx_writed_page_locked(struct efx_nic *efx,
0291 const efx_dword_t *value,
0292 unsigned int reg,
0293 unsigned int page)
0294 {
0295 unsigned long flags __attribute__ ((unused));
0296
0297 if (page == 0) {
0298 spin_lock_irqsave(&efx->biu_lock, flags);
0299 efx_writed(efx, value, efx_paged_reg(efx, page, reg));
0300 spin_unlock_irqrestore(&efx->biu_lock, flags);
0301 } else {
0302 efx_writed(efx, value, efx_paged_reg(efx, page, reg));
0303 }
0304 }
0305 #define efx_writed_page_locked(efx, value, reg, page) \
0306 _efx_writed_page_locked(efx, value, \
0307 reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
0308 page)
0309
0310 #endif