0001
0002
0003
0004
0005 #ifndef __RTW_HCI_H__
0006 #define __RTW_HCI_H__
0007
0008
0009 struct rtw_hci_ops {
0010 int (*tx_write)(struct rtw_dev *rtwdev,
0011 struct rtw_tx_pkt_info *pkt_info,
0012 struct sk_buff *skb);
0013 void (*tx_kick_off)(struct rtw_dev *rtwdev);
0014 void (*flush_queues)(struct rtw_dev *rtwdev, u32 queues, bool drop);
0015 int (*setup)(struct rtw_dev *rtwdev);
0016 int (*start)(struct rtw_dev *rtwdev);
0017 void (*stop)(struct rtw_dev *rtwdev);
0018 void (*deep_ps)(struct rtw_dev *rtwdev, bool enter);
0019 void (*link_ps)(struct rtw_dev *rtwdev, bool enter);
0020 void (*interface_cfg)(struct rtw_dev *rtwdev);
0021
0022 int (*write_data_rsvd_page)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
0023 int (*write_data_h2c)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
0024
0025 u8 (*read8)(struct rtw_dev *rtwdev, u32 addr);
0026 u16 (*read16)(struct rtw_dev *rtwdev, u32 addr);
0027 u32 (*read32)(struct rtw_dev *rtwdev, u32 addr);
0028 void (*write8)(struct rtw_dev *rtwdev, u32 addr, u8 val);
0029 void (*write16)(struct rtw_dev *rtwdev, u32 addr, u16 val);
0030 void (*write32)(struct rtw_dev *rtwdev, u32 addr, u32 val);
0031 };
0032
0033 static inline int rtw_hci_tx_write(struct rtw_dev *rtwdev,
0034 struct rtw_tx_pkt_info *pkt_info,
0035 struct sk_buff *skb)
0036 {
0037 return rtwdev->hci.ops->tx_write(rtwdev, pkt_info, skb);
0038 }
0039
0040 static inline void rtw_hci_tx_kick_off(struct rtw_dev *rtwdev)
0041 {
0042 return rtwdev->hci.ops->tx_kick_off(rtwdev);
0043 }
0044
0045 static inline int rtw_hci_setup(struct rtw_dev *rtwdev)
0046 {
0047 return rtwdev->hci.ops->setup(rtwdev);
0048 }
0049
0050 static inline int rtw_hci_start(struct rtw_dev *rtwdev)
0051 {
0052 return rtwdev->hci.ops->start(rtwdev);
0053 }
0054
0055 static inline void rtw_hci_stop(struct rtw_dev *rtwdev)
0056 {
0057 rtwdev->hci.ops->stop(rtwdev);
0058 }
0059
0060 static inline void rtw_hci_deep_ps(struct rtw_dev *rtwdev, bool enter)
0061 {
0062 rtwdev->hci.ops->deep_ps(rtwdev, enter);
0063 }
0064
0065 static inline void rtw_hci_link_ps(struct rtw_dev *rtwdev, bool enter)
0066 {
0067 rtwdev->hci.ops->link_ps(rtwdev, enter);
0068 }
0069
0070 static inline void rtw_hci_interface_cfg(struct rtw_dev *rtwdev)
0071 {
0072 rtwdev->hci.ops->interface_cfg(rtwdev);
0073 }
0074
0075 static inline int
0076 rtw_hci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
0077 {
0078 return rtwdev->hci.ops->write_data_rsvd_page(rtwdev, buf, size);
0079 }
0080
0081 static inline int
0082 rtw_hci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
0083 {
0084 return rtwdev->hci.ops->write_data_h2c(rtwdev, buf, size);
0085 }
0086
0087 static inline u8 rtw_read8(struct rtw_dev *rtwdev, u32 addr)
0088 {
0089 return rtwdev->hci.ops->read8(rtwdev, addr);
0090 }
0091
0092 static inline u16 rtw_read16(struct rtw_dev *rtwdev, u32 addr)
0093 {
0094 return rtwdev->hci.ops->read16(rtwdev, addr);
0095 }
0096
0097 static inline u32 rtw_read32(struct rtw_dev *rtwdev, u32 addr)
0098 {
0099 return rtwdev->hci.ops->read32(rtwdev, addr);
0100 }
0101
0102 static inline void rtw_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
0103 {
0104 rtwdev->hci.ops->write8(rtwdev, addr, val);
0105 }
0106
0107 static inline void rtw_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
0108 {
0109 rtwdev->hci.ops->write16(rtwdev, addr, val);
0110 }
0111
0112 static inline void rtw_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
0113 {
0114 rtwdev->hci.ops->write32(rtwdev, addr, val);
0115 }
0116
0117 static inline void rtw_write8_set(struct rtw_dev *rtwdev, u32 addr, u8 bit)
0118 {
0119 u8 val;
0120
0121 val = rtw_read8(rtwdev, addr);
0122 rtw_write8(rtwdev, addr, val | bit);
0123 }
0124
0125 static inline void rtw_write16_set(struct rtw_dev *rtwdev, u32 addr, u16 bit)
0126 {
0127 u16 val;
0128
0129 val = rtw_read16(rtwdev, addr);
0130 rtw_write16(rtwdev, addr, val | bit);
0131 }
0132
0133 static inline void rtw_write32_set(struct rtw_dev *rtwdev, u32 addr, u32 bit)
0134 {
0135 u32 val;
0136
0137 val = rtw_read32(rtwdev, addr);
0138 rtw_write32(rtwdev, addr, val | bit);
0139 }
0140
0141 static inline void rtw_write8_clr(struct rtw_dev *rtwdev, u32 addr, u8 bit)
0142 {
0143 u8 val;
0144
0145 val = rtw_read8(rtwdev, addr);
0146 rtw_write8(rtwdev, addr, val & ~bit);
0147 }
0148
0149 static inline void rtw_write16_clr(struct rtw_dev *rtwdev, u32 addr, u16 bit)
0150 {
0151 u16 val;
0152
0153 val = rtw_read16(rtwdev, addr);
0154 rtw_write16(rtwdev, addr, val & ~bit);
0155 }
0156
0157 static inline void rtw_write32_clr(struct rtw_dev *rtwdev, u32 addr, u32 bit)
0158 {
0159 u32 val;
0160
0161 val = rtw_read32(rtwdev, addr);
0162 rtw_write32(rtwdev, addr, val & ~bit);
0163 }
0164
0165 static inline u32
0166 rtw_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
0167 u32 addr, u32 mask)
0168 {
0169 unsigned long flags;
0170 u32 val;
0171
0172 spin_lock_irqsave(&rtwdev->rf_lock, flags);
0173 val = rtwdev->chip->ops->read_rf(rtwdev, rf_path, addr, mask);
0174 spin_unlock_irqrestore(&rtwdev->rf_lock, flags);
0175
0176 return val;
0177 }
0178
0179 static inline void
0180 rtw_write_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
0181 u32 addr, u32 mask, u32 data)
0182 {
0183 unsigned long flags;
0184
0185 spin_lock_irqsave(&rtwdev->rf_lock, flags);
0186 rtwdev->chip->ops->write_rf(rtwdev, rf_path, addr, mask, data);
0187 spin_unlock_irqrestore(&rtwdev->rf_lock, flags);
0188 }
0189
0190 static inline u32
0191 rtw_read32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask)
0192 {
0193 u32 shift = __ffs(mask);
0194 u32 orig;
0195 u32 ret;
0196
0197 orig = rtw_read32(rtwdev, addr);
0198 ret = (orig & mask) >> shift;
0199
0200 return ret;
0201 }
0202
0203 static inline u16
0204 rtw_read16_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask)
0205 {
0206 u32 shift = __ffs(mask);
0207 u32 orig;
0208 u32 ret;
0209
0210 orig = rtw_read16(rtwdev, addr);
0211 ret = (orig & mask) >> shift;
0212
0213 return ret;
0214 }
0215
0216 static inline u8
0217 rtw_read8_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask)
0218 {
0219 u32 shift = __ffs(mask);
0220 u32 orig;
0221 u32 ret;
0222
0223 orig = rtw_read8(rtwdev, addr);
0224 ret = (orig & mask) >> shift;
0225
0226 return ret;
0227 }
0228
0229 static inline void
0230 rtw_write32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
0231 {
0232 u32 shift = __ffs(mask);
0233 u32 orig;
0234 u32 set;
0235
0236 WARN(addr & 0x3, "should be 4-byte aligned, addr = 0x%08x\n", addr);
0237
0238 orig = rtw_read32(rtwdev, addr);
0239 set = (orig & ~mask) | ((data << shift) & mask);
0240 rtw_write32(rtwdev, addr, set);
0241 }
0242
0243 static inline void
0244 rtw_write8_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u8 data)
0245 {
0246 u32 shift;
0247 u8 orig, set;
0248
0249 mask &= 0xff;
0250 shift = __ffs(mask);
0251
0252 orig = rtw_read8(rtwdev, addr);
0253 set = (orig & ~mask) | ((data << shift) & mask);
0254 rtw_write8(rtwdev, addr, set);
0255 }
0256
0257 static inline enum rtw_hci_type rtw_hci_type(struct rtw_dev *rtwdev)
0258 {
0259 return rtwdev->hci.type;
0260 }
0261
0262 static inline void rtw_hci_flush_queues(struct rtw_dev *rtwdev, u32 queues,
0263 bool drop)
0264 {
0265 if (rtwdev->hci.ops->flush_queues)
0266 rtwdev->hci.ops->flush_queues(rtwdev, queues, drop);
0267 }
0268
0269 static inline void rtw_hci_flush_all_queues(struct rtw_dev *rtwdev, bool drop)
0270 {
0271 if (rtwdev->hci.ops->flush_queues)
0272 rtwdev->hci.ops->flush_queues(rtwdev,
0273 BIT(rtwdev->hw->queues) - 1,
0274 drop);
0275 }
0276
0277 #endif