0001
0002
0003
0004
0005
0006 #include <linux/module.h>
0007 #include <linux/firmware.h>
0008
0009 #include "mt76x02.h"
0010 #include "mt76x02_mcu.h"
0011 #include "mt76x02_usb.h"
0012
0013 #define MT_CMD_HDR_LEN 4
0014
0015 #define MT_FCE_DMA_ADDR 0x0230
0016 #define MT_FCE_DMA_LEN 0x0234
0017
0018 #define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
0019
0020 static void
0021 mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len)
0022 {
0023 struct mt76_usb *usb = &dev->usb;
0024 u32 reg, val;
0025 int i;
0026
0027 if (usb->mcu.burst) {
0028 WARN_ON_ONCE(len / 4 != usb->mcu.rp_len);
0029
0030 reg = usb->mcu.rp[0].reg - usb->mcu.base;
0031 for (i = 0; i < usb->mcu.rp_len; i++) {
0032 val = get_unaligned_le32(data + 4 * i);
0033 usb->mcu.rp[i].reg = reg++;
0034 usb->mcu.rp[i].value = val;
0035 }
0036 } else {
0037 WARN_ON_ONCE(len / 8 != usb->mcu.rp_len);
0038
0039 for (i = 0; i < usb->mcu.rp_len; i++) {
0040 reg = get_unaligned_le32(data + 8 * i) -
0041 usb->mcu.base;
0042 val = get_unaligned_le32(data + 8 * i + 4);
0043
0044 WARN_ON_ONCE(usb->mcu.rp[i].reg != reg);
0045 usb->mcu.rp[i].value = val;
0046 }
0047 }
0048 }
0049
0050 static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
0051 {
0052 struct mt76_usb *usb = &dev->usb;
0053 u8 *data = usb->mcu.data;
0054 int i, len, ret;
0055 u32 rxfce;
0056
0057 for (i = 0; i < 5; i++) {
0058 ret = mt76u_bulk_msg(dev, data, MCU_RESP_URB_SIZE, &len,
0059 300, MT_EP_IN_CMD_RESP);
0060 if (ret == -ETIMEDOUT)
0061 continue;
0062 if (ret)
0063 goto out;
0064
0065 if (usb->mcu.rp)
0066 mt76x02u_multiple_mcu_reads(dev, data + 4, len - 8);
0067
0068 rxfce = get_unaligned_le32(data);
0069 if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce) &&
0070 FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce) == EVT_CMD_DONE)
0071 return 0;
0072
0073 dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n",
0074 FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce),
0075 seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce));
0076 }
0077 out:
0078 dev_err(dev->dev, "error: %s failed with %d\n", __func__, ret);
0079 return ret;
0080 }
0081
0082 static int
0083 __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
0084 int cmd, bool wait_resp)
0085 {
0086 u8 seq = 0;
0087 u32 info;
0088 int ret;
0089
0090 if (test_bit(MT76_REMOVED, &dev->phy.state)) {
0091 ret = 0;
0092 goto out;
0093 }
0094
0095 if (wait_resp) {
0096 seq = ++dev->mcu.msg_seq & 0xf;
0097 if (!seq)
0098 seq = ++dev->mcu.msg_seq & 0xf;
0099 }
0100
0101 info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
0102 FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
0103 MT_MCU_MSG_TYPE_CMD;
0104 ret = mt76x02u_skb_dma_info(skb, CPU_TX_PORT, info);
0105 if (ret)
0106 return ret;
0107
0108 ret = mt76u_bulk_msg(dev, skb->data, skb->len, NULL, 500,
0109 MT_EP_OUT_INBAND_CMD);
0110 if (ret)
0111 goto out;
0112
0113 if (wait_resp)
0114 ret = mt76x02u_mcu_wait_resp(dev, seq);
0115
0116 out:
0117 consume_skb(skb);
0118
0119 return ret;
0120 }
0121
0122 static int
0123 mt76x02u_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data,
0124 int len, bool wait_resp)
0125 {
0126 struct sk_buff *skb;
0127 int err;
0128
0129 skb = mt76_mcu_msg_alloc(dev, data, len);
0130 if (!skb)
0131 return -ENOMEM;
0132
0133 mutex_lock(&dev->mcu.mutex);
0134 err = __mt76x02u_mcu_send_msg(dev, skb, cmd, wait_resp);
0135 mutex_unlock(&dev->mcu.mutex);
0136
0137 return err;
0138 }
0139
0140 static inline void skb_put_le32(struct sk_buff *skb, u32 val)
0141 {
0142 put_unaligned_le32(val, skb_put(skb, 4));
0143 }
0144
0145 static int
0146 mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
0147 const struct mt76_reg_pair *data, int n)
0148 {
0149 const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
0150 const int CMD_RANDOM_WRITE = 12;
0151 struct sk_buff *skb;
0152 int cnt, i, ret;
0153
0154 if (!n)
0155 return 0;
0156
0157 cnt = min(max_vals_per_cmd, n);
0158
0159 skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
0160 if (!skb)
0161 return -ENOMEM;
0162 skb_reserve(skb, MT_DMA_HDR_LEN);
0163
0164 for (i = 0; i < cnt; i++) {
0165 skb_put_le32(skb, base + data[i].reg);
0166 skb_put_le32(skb, data[i].value);
0167 }
0168
0169 mutex_lock(&dev->mcu.mutex);
0170 ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n);
0171 mutex_unlock(&dev->mcu.mutex);
0172 if (ret)
0173 return ret;
0174
0175 return mt76x02u_mcu_wr_rp(dev, base, data + cnt, n - cnt);
0176 }
0177
0178 static int
0179 mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
0180 struct mt76_reg_pair *data, int n)
0181 {
0182 const int CMD_RANDOM_READ = 10;
0183 const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
0184 struct mt76_usb *usb = &dev->usb;
0185 struct sk_buff *skb;
0186 int cnt, i, ret;
0187
0188 if (!n)
0189 return 0;
0190
0191 cnt = min(max_vals_per_cmd, n);
0192 if (cnt != n)
0193 return -EINVAL;
0194
0195 skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
0196 if (!skb)
0197 return -ENOMEM;
0198 skb_reserve(skb, MT_DMA_HDR_LEN);
0199
0200 for (i = 0; i < cnt; i++) {
0201 skb_put_le32(skb, base + data[i].reg);
0202 skb_put_le32(skb, data[i].value);
0203 }
0204
0205 mutex_lock(&dev->mcu.mutex);
0206
0207 usb->mcu.rp = data;
0208 usb->mcu.rp_len = n;
0209 usb->mcu.base = base;
0210 usb->mcu.burst = false;
0211
0212 ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_READ, true);
0213
0214 usb->mcu.rp = NULL;
0215
0216 mutex_unlock(&dev->mcu.mutex);
0217
0218 return ret;
0219 }
0220
0221 void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev)
0222 {
0223 mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
0224 USB_DIR_OUT | USB_TYPE_VENDOR,
0225 0x1, 0, NULL, 0);
0226 }
0227 EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset);
0228
0229 static int
0230 __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, u8 *data,
0231 const void *fw_data, int len, u32 dst_addr)
0232 {
0233 __le32 info;
0234 u32 val;
0235 int err, data_len;
0236
0237 info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
0238 FIELD_PREP(MT_MCU_MSG_LEN, len) |
0239 MT_MCU_MSG_TYPE_CMD);
0240
0241 memcpy(data, &info, sizeof(info));
0242 memcpy(data + sizeof(info), fw_data, len);
0243 memset(data + sizeof(info) + len, 0, 4);
0244
0245 mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
0246 MT_FCE_DMA_ADDR, dst_addr);
0247 len = roundup(len, 4);
0248 mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
0249 MT_FCE_DMA_LEN, len << 16);
0250
0251 data_len = MT_CMD_HDR_LEN + len + sizeof(info);
0252
0253 err = mt76u_bulk_msg(&dev->mt76, data, data_len, NULL, 1000,
0254 MT_EP_OUT_INBAND_CMD);
0255 if (err) {
0256 dev_err(dev->mt76.dev, "firmware upload failed: %d\n", err);
0257 return err;
0258 }
0259
0260 val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
0261 val++;
0262 mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
0263
0264 return 0;
0265 }
0266
0267 int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
0268 int data_len, u32 max_payload, u32 offset)
0269 {
0270 int len, err = 0, pos = 0, max_len = max_payload - 8;
0271 u8 *buf;
0272
0273 buf = kmalloc(max_payload, GFP_KERNEL);
0274 if (!buf)
0275 return -ENOMEM;
0276
0277 while (data_len > 0) {
0278 len = min_t(int, data_len, max_len);
0279 err = __mt76x02u_mcu_fw_send_data(dev, buf, data + pos,
0280 len, offset + pos);
0281 if (err < 0)
0282 break;
0283
0284 data_len -= len;
0285 pos += len;
0286 usleep_range(5000, 10000);
0287 }
0288 kfree(buf);
0289
0290 return err;
0291 }
0292 EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_send_data);
0293
0294 void mt76x02u_init_mcu(struct mt76_dev *dev)
0295 {
0296 static const struct mt76_mcu_ops mt76x02u_mcu_ops = {
0297 .headroom = MT_CMD_HDR_LEN,
0298 .tailroom = 8,
0299 .mcu_send_msg = mt76x02u_mcu_send_msg,
0300 .mcu_parse_response = mt76x02_mcu_parse_response,
0301 .mcu_wr_rp = mt76x02u_mcu_wr_rp,
0302 .mcu_rd_rp = mt76x02u_mcu_rd_rp,
0303 };
0304
0305 dev->mcu_ops = &mt76x02u_mcu_ops;
0306 }
0307 EXPORT_SYMBOL_GPL(mt76x02u_init_mcu);
0308
0309 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
0310 MODULE_LICENSE("Dual BSD/GPL");