0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/types.h>
0012 #include <linux/netdevice.h>
0013 #include <linux/etherdevice.h>
0014
0015 #include <brcmu_utils.h>
0016 #include <brcmu_wifi.h>
0017
0018 #include "core.h"
0019 #include "debug.h"
0020 #include "proto.h"
0021 #include "msgbuf.h"
0022 #include "commonring.h"
0023 #include "flowring.h"
0024 #include "bus.h"
0025 #include "tracepoint.h"
0026
0027
0028 #define MSGBUF_IOCTL_RESP_TIMEOUT msecs_to_jiffies(2000)
0029
0030 #define MSGBUF_TYPE_GEN_STATUS 0x1
0031 #define MSGBUF_TYPE_RING_STATUS 0x2
0032 #define MSGBUF_TYPE_FLOW_RING_CREATE 0x3
0033 #define MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT 0x4
0034 #define MSGBUF_TYPE_FLOW_RING_DELETE 0x5
0035 #define MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT 0x6
0036 #define MSGBUF_TYPE_FLOW_RING_FLUSH 0x7
0037 #define MSGBUF_TYPE_FLOW_RING_FLUSH_CMPLT 0x8
0038 #define MSGBUF_TYPE_IOCTLPTR_REQ 0x9
0039 #define MSGBUF_TYPE_IOCTLPTR_REQ_ACK 0xA
0040 #define MSGBUF_TYPE_IOCTLRESP_BUF_POST 0xB
0041 #define MSGBUF_TYPE_IOCTL_CMPLT 0xC
0042 #define MSGBUF_TYPE_EVENT_BUF_POST 0xD
0043 #define MSGBUF_TYPE_WL_EVENT 0xE
0044 #define MSGBUF_TYPE_TX_POST 0xF
0045 #define MSGBUF_TYPE_TX_STATUS 0x10
0046 #define MSGBUF_TYPE_RXBUF_POST 0x11
0047 #define MSGBUF_TYPE_RX_CMPLT 0x12
0048 #define MSGBUF_TYPE_LPBK_DMAXFER 0x13
0049 #define MSGBUF_TYPE_LPBK_DMAXFER_CMPLT 0x14
0050
0051 #define NR_TX_PKTIDS 2048
0052 #define NR_RX_PKTIDS 1024
0053
0054 #define BRCMF_IOCTL_REQ_PKTID 0xFFFE
0055
0056 #define BRCMF_MSGBUF_MAX_PKT_SIZE 2048
0057 #define BRCMF_MSGBUF_MAX_CTL_PKT_SIZE 8192
0058 #define BRCMF_MSGBUF_RXBUFPOST_THRESHOLD 32
0059 #define BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST 8
0060 #define BRCMF_MSGBUF_MAX_EVENTBUF_POST 8
0061
0062 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3 0x01
0063 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11 0x02
0064 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK 0x07
0065 #define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT 5
0066
0067 #define BRCMF_MSGBUF_TX_FLUSH_CNT1 32
0068 #define BRCMF_MSGBUF_TX_FLUSH_CNT2 96
0069
0070 #define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 96
0071 #define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS 32
0072 #define BRCMF_MSGBUF_UPDATE_RX_PTR_THRS 48
0073
0074
0075 struct msgbuf_common_hdr {
0076 u8 msgtype;
0077 u8 ifidx;
0078 u8 flags;
0079 u8 rsvd0;
0080 __le32 request_id;
0081 };
0082
0083 struct msgbuf_ioctl_req_hdr {
0084 struct msgbuf_common_hdr msg;
0085 __le32 cmd;
0086 __le16 trans_id;
0087 __le16 input_buf_len;
0088 __le16 output_buf_len;
0089 __le16 rsvd0[3];
0090 struct msgbuf_buf_addr req_buf_addr;
0091 __le32 rsvd1[2];
0092 };
0093
0094 struct msgbuf_tx_msghdr {
0095 struct msgbuf_common_hdr msg;
0096 u8 txhdr[ETH_HLEN];
0097 u8 flags;
0098 u8 seg_cnt;
0099 struct msgbuf_buf_addr metadata_buf_addr;
0100 struct msgbuf_buf_addr data_buf_addr;
0101 __le16 metadata_buf_len;
0102 __le16 data_len;
0103 __le32 rsvd0;
0104 };
0105
0106 struct msgbuf_rx_bufpost {
0107 struct msgbuf_common_hdr msg;
0108 __le16 metadata_buf_len;
0109 __le16 data_buf_len;
0110 __le32 rsvd0;
0111 struct msgbuf_buf_addr metadata_buf_addr;
0112 struct msgbuf_buf_addr data_buf_addr;
0113 };
0114
0115 struct msgbuf_rx_ioctl_resp_or_event {
0116 struct msgbuf_common_hdr msg;
0117 __le16 host_buf_len;
0118 __le16 rsvd0[3];
0119 struct msgbuf_buf_addr host_buf_addr;
0120 __le32 rsvd1[4];
0121 };
0122
0123 struct msgbuf_completion_hdr {
0124 __le16 status;
0125 __le16 flow_ring_id;
0126 };
0127
0128
0129 struct msgbuf_gen_status {
0130 struct msgbuf_common_hdr msg;
0131 struct msgbuf_completion_hdr compl_hdr;
0132 __le16 write_idx;
0133 __le32 rsvd0[3];
0134 };
0135
0136
0137 struct msgbuf_ring_status {
0138 struct msgbuf_common_hdr msg;
0139 struct msgbuf_completion_hdr compl_hdr;
0140 __le16 write_idx;
0141 __le16 rsvd0[5];
0142 };
0143
0144 struct msgbuf_rx_event {
0145 struct msgbuf_common_hdr msg;
0146 struct msgbuf_completion_hdr compl_hdr;
0147 __le16 event_data_len;
0148 __le16 seqnum;
0149 __le16 rsvd0[4];
0150 };
0151
0152 struct msgbuf_ioctl_resp_hdr {
0153 struct msgbuf_common_hdr msg;
0154 struct msgbuf_completion_hdr compl_hdr;
0155 __le16 resp_len;
0156 __le16 trans_id;
0157 __le32 cmd;
0158 __le32 rsvd0;
0159 };
0160
0161 struct msgbuf_tx_status {
0162 struct msgbuf_common_hdr msg;
0163 struct msgbuf_completion_hdr compl_hdr;
0164 __le16 metadata_len;
0165 __le16 tx_status;
0166 };
0167
0168 struct msgbuf_rx_complete {
0169 struct msgbuf_common_hdr msg;
0170 struct msgbuf_completion_hdr compl_hdr;
0171 __le16 metadata_len;
0172 __le16 data_len;
0173 __le16 data_offset;
0174 __le16 flags;
0175 __le32 rx_status_0;
0176 __le32 rx_status_1;
0177 __le32 rsvd0;
0178 };
0179
0180 struct msgbuf_tx_flowring_create_req {
0181 struct msgbuf_common_hdr msg;
0182 u8 da[ETH_ALEN];
0183 u8 sa[ETH_ALEN];
0184 u8 tid;
0185 u8 if_flags;
0186 __le16 flow_ring_id;
0187 u8 tc;
0188 u8 priority;
0189 __le16 int_vector;
0190 __le16 max_items;
0191 __le16 len_item;
0192 struct msgbuf_buf_addr flow_ring_addr;
0193 };
0194
0195 struct msgbuf_tx_flowring_delete_req {
0196 struct msgbuf_common_hdr msg;
0197 __le16 flow_ring_id;
0198 __le16 reason;
0199 __le32 rsvd0[7];
0200 };
0201
0202 struct msgbuf_flowring_create_resp {
0203 struct msgbuf_common_hdr msg;
0204 struct msgbuf_completion_hdr compl_hdr;
0205 __le32 rsvd0[3];
0206 };
0207
0208 struct msgbuf_flowring_delete_resp {
0209 struct msgbuf_common_hdr msg;
0210 struct msgbuf_completion_hdr compl_hdr;
0211 __le32 rsvd0[3];
0212 };
0213
0214 struct msgbuf_flowring_flush_resp {
0215 struct msgbuf_common_hdr msg;
0216 struct msgbuf_completion_hdr compl_hdr;
0217 __le32 rsvd0[3];
0218 };
0219
0220 struct brcmf_msgbuf_work_item {
0221 struct list_head queue;
0222 u32 flowid;
0223 int ifidx;
0224 u8 sa[ETH_ALEN];
0225 u8 da[ETH_ALEN];
0226 };
0227
0228 struct brcmf_msgbuf {
0229 struct brcmf_pub *drvr;
0230
0231 struct brcmf_commonring **commonrings;
0232 struct brcmf_commonring **flowrings;
0233 dma_addr_t *flowring_dma_handle;
0234
0235 u16 max_flowrings;
0236 u16 max_submissionrings;
0237 u16 max_completionrings;
0238
0239 u16 rx_dataoffset;
0240 u32 max_rxbufpost;
0241 u16 rx_metadata_offset;
0242 u32 rxbufpost;
0243
0244 u32 max_ioctlrespbuf;
0245 u32 cur_ioctlrespbuf;
0246 u32 max_eventbuf;
0247 u32 cur_eventbuf;
0248
0249 void *ioctbuf;
0250 dma_addr_t ioctbuf_handle;
0251 u32 ioctbuf_phys_hi;
0252 u32 ioctbuf_phys_lo;
0253 int ioctl_resp_status;
0254 u32 ioctl_resp_ret_len;
0255 u32 ioctl_resp_pktid;
0256
0257 u16 data_seq_no;
0258 u16 ioctl_seq_no;
0259 u32 reqid;
0260 wait_queue_head_t ioctl_resp_wait;
0261 bool ctl_completed;
0262
0263 struct brcmf_msgbuf_pktids *tx_pktids;
0264 struct brcmf_msgbuf_pktids *rx_pktids;
0265 struct brcmf_flowring *flow;
0266
0267 struct workqueue_struct *txflow_wq;
0268 struct work_struct txflow_work;
0269 unsigned long *flow_map;
0270 unsigned long *txstatus_done_map;
0271
0272 struct work_struct flowring_work;
0273 spinlock_t flowring_work_lock;
0274 struct list_head work_queue;
0275 };
0276
0277 struct brcmf_msgbuf_pktid {
0278 atomic_t allocated;
0279 u16 data_offset;
0280 struct sk_buff *skb;
0281 dma_addr_t physaddr;
0282 };
0283
0284 struct brcmf_msgbuf_pktids {
0285 u32 array_size;
0286 u32 last_allocated_idx;
0287 enum dma_data_direction direction;
0288 struct brcmf_msgbuf_pktid *array;
0289 };
0290
0291 static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf);
0292
0293
0294 static struct brcmf_msgbuf_pktids *
0295 brcmf_msgbuf_init_pktids(u32 nr_array_entries,
0296 enum dma_data_direction direction)
0297 {
0298 struct brcmf_msgbuf_pktid *array;
0299 struct brcmf_msgbuf_pktids *pktids;
0300
0301 array = kcalloc(nr_array_entries, sizeof(*array), GFP_KERNEL);
0302 if (!array)
0303 return NULL;
0304
0305 pktids = kzalloc(sizeof(*pktids), GFP_KERNEL);
0306 if (!pktids) {
0307 kfree(array);
0308 return NULL;
0309 }
0310 pktids->array = array;
0311 pktids->array_size = nr_array_entries;
0312
0313 return pktids;
0314 }
0315
0316
0317 static int
0318 brcmf_msgbuf_alloc_pktid(struct device *dev,
0319 struct brcmf_msgbuf_pktids *pktids,
0320 struct sk_buff *skb, u16 data_offset,
0321 dma_addr_t *physaddr, u32 *idx)
0322 {
0323 struct brcmf_msgbuf_pktid *array;
0324 u32 count;
0325
0326 array = pktids->array;
0327
0328 *physaddr = dma_map_single(dev, skb->data + data_offset,
0329 skb->len - data_offset, pktids->direction);
0330
0331 if (dma_mapping_error(dev, *physaddr)) {
0332 brcmf_err("dma_map_single failed !!\n");
0333 return -ENOMEM;
0334 }
0335
0336 *idx = pktids->last_allocated_idx;
0337
0338 count = 0;
0339 do {
0340 (*idx)++;
0341 if (*idx == pktids->array_size)
0342 *idx = 0;
0343 if (array[*idx].allocated.counter == 0)
0344 if (atomic_cmpxchg(&array[*idx].allocated, 0, 1) == 0)
0345 break;
0346 count++;
0347 } while (count < pktids->array_size);
0348
0349 if (count == pktids->array_size)
0350 return -ENOMEM;
0351
0352 array[*idx].data_offset = data_offset;
0353 array[*idx].physaddr = *physaddr;
0354 array[*idx].skb = skb;
0355
0356 pktids->last_allocated_idx = *idx;
0357
0358 return 0;
0359 }
0360
0361
0362 static struct sk_buff *
0363 brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids,
0364 u32 idx)
0365 {
0366 struct brcmf_msgbuf_pktid *pktid;
0367 struct sk_buff *skb;
0368
0369 if (idx >= pktids->array_size) {
0370 brcmf_err("Invalid packet id %d (max %d)\n", idx,
0371 pktids->array_size);
0372 return NULL;
0373 }
0374 if (pktids->array[idx].allocated.counter) {
0375 pktid = &pktids->array[idx];
0376 dma_unmap_single(dev, pktid->physaddr,
0377 pktid->skb->len - pktid->data_offset,
0378 pktids->direction);
0379 skb = pktid->skb;
0380 pktid->allocated.counter = 0;
0381 return skb;
0382 } else {
0383 brcmf_err("Invalid packet id %d (not in use)\n", idx);
0384 }
0385
0386 return NULL;
0387 }
0388
0389
0390 static void
0391 brcmf_msgbuf_release_array(struct device *dev,
0392 struct brcmf_msgbuf_pktids *pktids)
0393 {
0394 struct brcmf_msgbuf_pktid *array;
0395 struct brcmf_msgbuf_pktid *pktid;
0396 u32 count;
0397
0398 array = pktids->array;
0399 count = 0;
0400 do {
0401 if (array[count].allocated.counter) {
0402 pktid = &array[count];
0403 dma_unmap_single(dev, pktid->physaddr,
0404 pktid->skb->len - pktid->data_offset,
0405 pktids->direction);
0406 brcmu_pkt_buf_free_skb(pktid->skb);
0407 }
0408 count++;
0409 } while (count < pktids->array_size);
0410
0411 kfree(array);
0412 kfree(pktids);
0413 }
0414
0415
0416 static void brcmf_msgbuf_release_pktids(struct brcmf_msgbuf *msgbuf)
0417 {
0418 if (msgbuf->rx_pktids)
0419 brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev,
0420 msgbuf->rx_pktids);
0421 if (msgbuf->tx_pktids)
0422 brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev,
0423 msgbuf->tx_pktids);
0424 }
0425
0426
0427 static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx,
0428 uint cmd, void *buf, uint len)
0429 {
0430 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
0431 struct brcmf_commonring *commonring;
0432 struct msgbuf_ioctl_req_hdr *request;
0433 u16 buf_len;
0434 void *ret_ptr;
0435 int err;
0436
0437 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
0438 brcmf_commonring_lock(commonring);
0439 ret_ptr = brcmf_commonring_reserve_for_write(commonring);
0440 if (!ret_ptr) {
0441 bphy_err(drvr, "Failed to reserve space in commonring\n");
0442 brcmf_commonring_unlock(commonring);
0443 return -ENOMEM;
0444 }
0445
0446 msgbuf->reqid++;
0447
0448 request = (struct msgbuf_ioctl_req_hdr *)ret_ptr;
0449 request->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
0450 request->msg.ifidx = (u8)ifidx;
0451 request->msg.flags = 0;
0452 request->msg.request_id = cpu_to_le32(BRCMF_IOCTL_REQ_PKTID);
0453 request->cmd = cpu_to_le32(cmd);
0454 request->output_buf_len = cpu_to_le16(len);
0455 request->trans_id = cpu_to_le16(msgbuf->reqid);
0456
0457 buf_len = min_t(u16, len, BRCMF_TX_IOCTL_MAX_MSG_SIZE);
0458 request->input_buf_len = cpu_to_le16(buf_len);
0459 request->req_buf_addr.high_addr = cpu_to_le32(msgbuf->ioctbuf_phys_hi);
0460 request->req_buf_addr.low_addr = cpu_to_le32(msgbuf->ioctbuf_phys_lo);
0461 if (buf)
0462 memcpy(msgbuf->ioctbuf, buf, buf_len);
0463 else
0464 memset(msgbuf->ioctbuf, 0, buf_len);
0465
0466 err = brcmf_commonring_write_complete(commonring);
0467 brcmf_commonring_unlock(commonring);
0468
0469 return err;
0470 }
0471
0472
0473 static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf *msgbuf)
0474 {
0475 return wait_event_timeout(msgbuf->ioctl_resp_wait,
0476 msgbuf->ctl_completed,
0477 MSGBUF_IOCTL_RESP_TIMEOUT);
0478 }
0479
0480
0481 static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf)
0482 {
0483 msgbuf->ctl_completed = true;
0484 wake_up(&msgbuf->ioctl_resp_wait);
0485 }
0486
0487
0488 static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
0489 uint cmd, void *buf, uint len, int *fwerr)
0490 {
0491 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
0492 struct sk_buff *skb = NULL;
0493 int timeout;
0494 int err;
0495
0496 brcmf_dbg(MSGBUF, "ifidx=%d, cmd=%d, len=%d\n", ifidx, cmd, len);
0497 *fwerr = 0;
0498 msgbuf->ctl_completed = false;
0499 err = brcmf_msgbuf_tx_ioctl(drvr, ifidx, cmd, buf, len);
0500 if (err)
0501 return err;
0502
0503 timeout = brcmf_msgbuf_ioctl_resp_wait(msgbuf);
0504 if (!timeout) {
0505 bphy_err(drvr, "Timeout on response for query command\n");
0506 return -EIO;
0507 }
0508
0509 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
0510 msgbuf->rx_pktids,
0511 msgbuf->ioctl_resp_pktid);
0512 if (msgbuf->ioctl_resp_ret_len != 0) {
0513 if (!skb)
0514 return -EBADF;
0515
0516 memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
0517 len : msgbuf->ioctl_resp_ret_len);
0518 }
0519 brcmu_pkt_buf_free_skb(skb);
0520
0521 *fwerr = msgbuf->ioctl_resp_status;
0522 return 0;
0523 }
0524
0525
0526 static int brcmf_msgbuf_set_dcmd(struct brcmf_pub *drvr, int ifidx,
0527 uint cmd, void *buf, uint len, int *fwerr)
0528 {
0529 return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len, fwerr);
0530 }
0531
0532
0533 static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
0534 struct sk_buff *skb, struct brcmf_if **ifp)
0535 {
0536 return -ENODEV;
0537 }
0538
0539 static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
0540 {
0541 }
0542
0543 static void
0544 brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid)
0545 {
0546 u32 dma_sz;
0547 void *dma_buf;
0548
0549 brcmf_dbg(MSGBUF, "Removing flowring %d\n", flowid);
0550
0551 dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE;
0552 dma_buf = msgbuf->flowrings[flowid]->buf_addr;
0553 dma_free_coherent(msgbuf->drvr->bus_if->dev, dma_sz, dma_buf,
0554 msgbuf->flowring_dma_handle[flowid]);
0555
0556 brcmf_flowring_delete(msgbuf->flow, flowid);
0557 }
0558
0559
0560 static struct brcmf_msgbuf_work_item *
0561 brcmf_msgbuf_dequeue_work(struct brcmf_msgbuf *msgbuf)
0562 {
0563 struct brcmf_msgbuf_work_item *work = NULL;
0564 ulong flags;
0565
0566 spin_lock_irqsave(&msgbuf->flowring_work_lock, flags);
0567 if (!list_empty(&msgbuf->work_queue)) {
0568 work = list_first_entry(&msgbuf->work_queue,
0569 struct brcmf_msgbuf_work_item, queue);
0570 list_del(&work->queue);
0571 }
0572 spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags);
0573
0574 return work;
0575 }
0576
0577
0578 static u32
0579 brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf,
0580 struct brcmf_msgbuf_work_item *work)
0581 {
0582 struct brcmf_pub *drvr = msgbuf->drvr;
0583 struct msgbuf_tx_flowring_create_req *create;
0584 struct brcmf_commonring *commonring;
0585 void *ret_ptr;
0586 u32 flowid;
0587 void *dma_buf;
0588 u32 dma_sz;
0589 u64 address;
0590 int err;
0591
0592 flowid = work->flowid;
0593 dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE;
0594 dma_buf = dma_alloc_coherent(msgbuf->drvr->bus_if->dev, dma_sz,
0595 &msgbuf->flowring_dma_handle[flowid],
0596 GFP_KERNEL);
0597 if (!dma_buf) {
0598 bphy_err(drvr, "dma_alloc_coherent failed\n");
0599 brcmf_flowring_delete(msgbuf->flow, flowid);
0600 return BRCMF_FLOWRING_INVALID_ID;
0601 }
0602
0603 brcmf_commonring_config(msgbuf->flowrings[flowid],
0604 BRCMF_H2D_TXFLOWRING_MAX_ITEM,
0605 BRCMF_H2D_TXFLOWRING_ITEMSIZE, dma_buf);
0606
0607 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
0608 brcmf_commonring_lock(commonring);
0609 ret_ptr = brcmf_commonring_reserve_for_write(commonring);
0610 if (!ret_ptr) {
0611 bphy_err(drvr, "Failed to reserve space in commonring\n");
0612 brcmf_commonring_unlock(commonring);
0613 brcmf_msgbuf_remove_flowring(msgbuf, flowid);
0614 return BRCMF_FLOWRING_INVALID_ID;
0615 }
0616
0617 create = (struct msgbuf_tx_flowring_create_req *)ret_ptr;
0618 create->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
0619 create->msg.ifidx = work->ifidx;
0620 create->msg.request_id = 0;
0621 create->tid = brcmf_flowring_tid(msgbuf->flow, flowid);
0622 create->flow_ring_id = cpu_to_le16(flowid +
0623 BRCMF_H2D_MSGRING_FLOWRING_IDSTART);
0624 memcpy(create->sa, work->sa, ETH_ALEN);
0625 memcpy(create->da, work->da, ETH_ALEN);
0626 address = (u64)msgbuf->flowring_dma_handle[flowid];
0627 create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32);
0628 create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff);
0629 create->max_items = cpu_to_le16(BRCMF_H2D_TXFLOWRING_MAX_ITEM);
0630 create->len_item = cpu_to_le16(BRCMF_H2D_TXFLOWRING_ITEMSIZE);
0631
0632 brcmf_dbg(MSGBUF, "Send Flow Create Req flow ID %d for peer %pM prio %d ifindex %d\n",
0633 flowid, work->da, create->tid, work->ifidx);
0634
0635 err = brcmf_commonring_write_complete(commonring);
0636 brcmf_commonring_unlock(commonring);
0637 if (err) {
0638 bphy_err(drvr, "Failed to write commonring\n");
0639 brcmf_msgbuf_remove_flowring(msgbuf, flowid);
0640 return BRCMF_FLOWRING_INVALID_ID;
0641 }
0642
0643 return flowid;
0644 }
0645
0646
0647 static void brcmf_msgbuf_flowring_worker(struct work_struct *work)
0648 {
0649 struct brcmf_msgbuf *msgbuf;
0650 struct brcmf_msgbuf_work_item *create;
0651
0652 msgbuf = container_of(work, struct brcmf_msgbuf, flowring_work);
0653
0654 while ((create = brcmf_msgbuf_dequeue_work(msgbuf))) {
0655 brcmf_msgbuf_flowring_create_worker(msgbuf, create);
0656 kfree(create);
0657 }
0658 }
0659
0660
0661 static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
0662 struct sk_buff *skb)
0663 {
0664 struct brcmf_msgbuf_work_item *create;
0665 struct ethhdr *eh = (struct ethhdr *)(skb->data);
0666 u32 flowid;
0667 ulong flags;
0668
0669 create = kzalloc(sizeof(*create), GFP_ATOMIC);
0670 if (create == NULL)
0671 return BRCMF_FLOWRING_INVALID_ID;
0672
0673 flowid = brcmf_flowring_create(msgbuf->flow, eh->h_dest,
0674 skb->priority, ifidx);
0675 if (flowid == BRCMF_FLOWRING_INVALID_ID) {
0676 kfree(create);
0677 return flowid;
0678 }
0679
0680 create->flowid = flowid;
0681 create->ifidx = ifidx;
0682 memcpy(create->sa, eh->h_source, ETH_ALEN);
0683 memcpy(create->da, eh->h_dest, ETH_ALEN);
0684
0685 spin_lock_irqsave(&msgbuf->flowring_work_lock, flags);
0686 list_add_tail(&create->queue, &msgbuf->work_queue);
0687 spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags);
0688 schedule_work(&msgbuf->flowring_work);
0689
0690 return flowid;
0691 }
0692
0693
0694 static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid)
0695 {
0696 struct brcmf_flowring *flow = msgbuf->flow;
0697 struct brcmf_pub *drvr = msgbuf->drvr;
0698 struct brcmf_commonring *commonring;
0699 void *ret_ptr;
0700 u32 count;
0701 struct sk_buff *skb;
0702 dma_addr_t physaddr;
0703 u32 pktid;
0704 struct msgbuf_tx_msghdr *tx_msghdr;
0705 u64 address;
0706
0707 commonring = msgbuf->flowrings[flowid];
0708 if (!brcmf_commonring_write_available(commonring))
0709 return;
0710
0711 brcmf_commonring_lock(commonring);
0712
0713 count = BRCMF_MSGBUF_TX_FLUSH_CNT2 - BRCMF_MSGBUF_TX_FLUSH_CNT1;
0714 while (brcmf_flowring_qlen(flow, flowid)) {
0715 skb = brcmf_flowring_dequeue(flow, flowid);
0716 if (skb == NULL) {
0717 bphy_err(drvr, "No SKB, but qlen %d\n",
0718 brcmf_flowring_qlen(flow, flowid));
0719 break;
0720 }
0721 skb_orphan(skb);
0722 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
0723 msgbuf->tx_pktids, skb, ETH_HLEN,
0724 &physaddr, &pktid)) {
0725 brcmf_flowring_reinsert(flow, flowid, skb);
0726 bphy_err(drvr, "No PKTID available !!\n");
0727 break;
0728 }
0729 ret_ptr = brcmf_commonring_reserve_for_write(commonring);
0730 if (!ret_ptr) {
0731 brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
0732 msgbuf->tx_pktids, pktid);
0733 brcmf_flowring_reinsert(flow, flowid, skb);
0734 break;
0735 }
0736 count++;
0737
0738 tx_msghdr = (struct msgbuf_tx_msghdr *)ret_ptr;
0739
0740 tx_msghdr->msg.msgtype = MSGBUF_TYPE_TX_POST;
0741 tx_msghdr->msg.request_id = cpu_to_le32(pktid + 1);
0742 tx_msghdr->msg.ifidx = brcmf_flowring_ifidx_get(flow, flowid);
0743 tx_msghdr->flags = BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3;
0744 tx_msghdr->flags |= (skb->priority & 0x07) <<
0745 BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
0746 tx_msghdr->seg_cnt = 1;
0747 memcpy(tx_msghdr->txhdr, skb->data, ETH_HLEN);
0748 tx_msghdr->data_len = cpu_to_le16(skb->len - ETH_HLEN);
0749 address = (u64)physaddr;
0750 tx_msghdr->data_buf_addr.high_addr = cpu_to_le32(address >> 32);
0751 tx_msghdr->data_buf_addr.low_addr =
0752 cpu_to_le32(address & 0xffffffff);
0753 tx_msghdr->metadata_buf_len = 0;
0754 tx_msghdr->metadata_buf_addr.high_addr = 0;
0755 tx_msghdr->metadata_buf_addr.low_addr = 0;
0756 atomic_inc(&commonring->outstanding_tx);
0757 if (count >= BRCMF_MSGBUF_TX_FLUSH_CNT2) {
0758 brcmf_commonring_write_complete(commonring);
0759 count = 0;
0760 }
0761 }
0762 if (count)
0763 brcmf_commonring_write_complete(commonring);
0764 brcmf_commonring_unlock(commonring);
0765 }
0766
0767
0768 static void brcmf_msgbuf_txflow_worker(struct work_struct *worker)
0769 {
0770 struct brcmf_msgbuf *msgbuf;
0771 u32 flowid;
0772
0773 msgbuf = container_of(worker, struct brcmf_msgbuf, txflow_work);
0774 for_each_set_bit(flowid, msgbuf->flow_map, msgbuf->max_flowrings) {
0775 clear_bit(flowid, msgbuf->flow_map);
0776 brcmf_msgbuf_txflow(msgbuf, flowid);
0777 }
0778 }
0779
0780
0781 static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf *msgbuf, u32 flowid,
0782 bool force)
0783 {
0784 struct brcmf_commonring *commonring;
0785
0786 set_bit(flowid, msgbuf->flow_map);
0787 commonring = msgbuf->flowrings[flowid];
0788 if ((force) || (atomic_read(&commonring->outstanding_tx) <
0789 BRCMF_MSGBUF_DELAY_TXWORKER_THRS))
0790 queue_work(msgbuf->txflow_wq, &msgbuf->txflow_work);
0791
0792 return 0;
0793 }
0794
0795
0796 static int brcmf_msgbuf_tx_queue_data(struct brcmf_pub *drvr, int ifidx,
0797 struct sk_buff *skb)
0798 {
0799 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
0800 struct brcmf_flowring *flow = msgbuf->flow;
0801 struct ethhdr *eh = (struct ethhdr *)(skb->data);
0802 u32 flowid;
0803 u32 queue_count;
0804 bool force;
0805
0806 flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx);
0807 if (flowid == BRCMF_FLOWRING_INVALID_ID) {
0808 flowid = brcmf_msgbuf_flowring_create(msgbuf, ifidx, skb);
0809 if (flowid == BRCMF_FLOWRING_INVALID_ID)
0810 return -ENOMEM;
0811 }
0812 queue_count = brcmf_flowring_enqueue(flow, flowid, skb);
0813 force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0);
0814 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, force);
0815
0816 return 0;
0817 }
0818
0819
0820 static void
0821 brcmf_msgbuf_configure_addr_mode(struct brcmf_pub *drvr, int ifidx,
0822 enum proto_addr_mode addr_mode)
0823 {
0824 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
0825
0826 brcmf_flowring_configure_addr_mode(msgbuf->flow, ifidx, addr_mode);
0827 }
0828
0829
0830 static void
0831 brcmf_msgbuf_delete_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
0832 {
0833 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
0834
0835 brcmf_flowring_delete_peer(msgbuf->flow, ifidx, peer);
0836 }
0837
0838
0839 static void
0840 brcmf_msgbuf_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
0841 {
0842 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
0843
0844 brcmf_flowring_add_tdls_peer(msgbuf->flow, ifidx, peer);
0845 }
0846
0847
0848 static void
0849 brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf *msgbuf, void *buf)
0850 {
0851 struct msgbuf_ioctl_resp_hdr *ioctl_resp;
0852
0853 ioctl_resp = (struct msgbuf_ioctl_resp_hdr *)buf;
0854
0855 msgbuf->ioctl_resp_status =
0856 (s16)le16_to_cpu(ioctl_resp->compl_hdr.status);
0857 msgbuf->ioctl_resp_ret_len = le16_to_cpu(ioctl_resp->resp_len);
0858 msgbuf->ioctl_resp_pktid = le32_to_cpu(ioctl_resp->msg.request_id);
0859
0860 brcmf_msgbuf_ioctl_resp_wake(msgbuf);
0861
0862 if (msgbuf->cur_ioctlrespbuf)
0863 msgbuf->cur_ioctlrespbuf--;
0864 brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf);
0865 }
0866
0867
0868 static void
0869 brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
0870 {
0871 struct brcmf_commonring *commonring;
0872 struct msgbuf_tx_status *tx_status;
0873 u32 idx;
0874 struct sk_buff *skb;
0875 u16 flowid;
0876
0877 tx_status = (struct msgbuf_tx_status *)buf;
0878 idx = le32_to_cpu(tx_status->msg.request_id) - 1;
0879 flowid = le16_to_cpu(tx_status->compl_hdr.flow_ring_id);
0880 flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
0881 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
0882 msgbuf->tx_pktids, idx);
0883 if (!skb)
0884 return;
0885
0886 set_bit(flowid, msgbuf->txstatus_done_map);
0887 commonring = msgbuf->flowrings[flowid];
0888 atomic_dec(&commonring->outstanding_tx);
0889
0890 brcmf_txfinalize(brcmf_get_ifp(msgbuf->drvr, tx_status->msg.ifidx),
0891 skb, true);
0892 }
0893
0894
0895 static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count)
0896 {
0897 struct brcmf_pub *drvr = msgbuf->drvr;
0898 struct brcmf_commonring *commonring;
0899 void *ret_ptr;
0900 struct sk_buff *skb;
0901 u16 alloced;
0902 u32 pktlen;
0903 dma_addr_t physaddr;
0904 struct msgbuf_rx_bufpost *rx_bufpost;
0905 u64 address;
0906 u32 pktid;
0907 u32 i;
0908
0909 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT];
0910 ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring,
0911 count,
0912 &alloced);
0913 if (!ret_ptr) {
0914 brcmf_dbg(MSGBUF, "Failed to reserve space in commonring\n");
0915 return 0;
0916 }
0917
0918 for (i = 0; i < alloced; i++) {
0919 rx_bufpost = (struct msgbuf_rx_bufpost *)ret_ptr;
0920 memset(rx_bufpost, 0, sizeof(*rx_bufpost));
0921
0922 skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE);
0923
0924 if (skb == NULL) {
0925 bphy_err(drvr, "Failed to alloc SKB\n");
0926 brcmf_commonring_write_cancel(commonring, alloced - i);
0927 break;
0928 }
0929
0930 pktlen = skb->len;
0931 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
0932 msgbuf->rx_pktids, skb, 0,
0933 &physaddr, &pktid)) {
0934 dev_kfree_skb_any(skb);
0935 bphy_err(drvr, "No PKTID available !!\n");
0936 brcmf_commonring_write_cancel(commonring, alloced - i);
0937 break;
0938 }
0939
0940 if (msgbuf->rx_metadata_offset) {
0941 address = (u64)physaddr;
0942 rx_bufpost->metadata_buf_len =
0943 cpu_to_le16(msgbuf->rx_metadata_offset);
0944 rx_bufpost->metadata_buf_addr.high_addr =
0945 cpu_to_le32(address >> 32);
0946 rx_bufpost->metadata_buf_addr.low_addr =
0947 cpu_to_le32(address & 0xffffffff);
0948
0949 skb_pull(skb, msgbuf->rx_metadata_offset);
0950 pktlen = skb->len;
0951 physaddr += msgbuf->rx_metadata_offset;
0952 }
0953 rx_bufpost->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
0954 rx_bufpost->msg.request_id = cpu_to_le32(pktid);
0955
0956 address = (u64)physaddr;
0957 rx_bufpost->data_buf_len = cpu_to_le16((u16)pktlen);
0958 rx_bufpost->data_buf_addr.high_addr =
0959 cpu_to_le32(address >> 32);
0960 rx_bufpost->data_buf_addr.low_addr =
0961 cpu_to_le32(address & 0xffffffff);
0962
0963 ret_ptr += brcmf_commonring_len_item(commonring);
0964 }
0965
0966 if (i)
0967 brcmf_commonring_write_complete(commonring);
0968
0969 return i;
0970 }
0971
0972
0973 static void
0974 brcmf_msgbuf_rxbuf_data_fill(struct brcmf_msgbuf *msgbuf)
0975 {
0976 u32 fillbufs;
0977 u32 retcount;
0978
0979 fillbufs = msgbuf->max_rxbufpost - msgbuf->rxbufpost;
0980
0981 while (fillbufs) {
0982 retcount = brcmf_msgbuf_rxbuf_data_post(msgbuf, fillbufs);
0983 if (!retcount)
0984 break;
0985 msgbuf->rxbufpost += retcount;
0986 fillbufs -= retcount;
0987 }
0988 }
0989
0990
0991 static void
0992 brcmf_msgbuf_update_rxbufpost_count(struct brcmf_msgbuf *msgbuf, u16 rxcnt)
0993 {
0994 msgbuf->rxbufpost -= rxcnt;
0995 if (msgbuf->rxbufpost <= (msgbuf->max_rxbufpost -
0996 BRCMF_MSGBUF_RXBUFPOST_THRESHOLD))
0997 brcmf_msgbuf_rxbuf_data_fill(msgbuf);
0998 }
0999
1000
1001 static u32
1002 brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf,
1003 u32 count)
1004 {
1005 struct brcmf_pub *drvr = msgbuf->drvr;
1006 struct brcmf_commonring *commonring;
1007 void *ret_ptr;
1008 struct sk_buff *skb;
1009 u16 alloced;
1010 u32 pktlen;
1011 dma_addr_t physaddr;
1012 struct msgbuf_rx_ioctl_resp_or_event *rx_bufpost;
1013 u64 address;
1014 u32 pktid;
1015 u32 i;
1016
1017 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
1018 brcmf_commonring_lock(commonring);
1019 ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring,
1020 count,
1021 &alloced);
1022 if (!ret_ptr) {
1023 bphy_err(drvr, "Failed to reserve space in commonring\n");
1024 brcmf_commonring_unlock(commonring);
1025 return 0;
1026 }
1027
1028 for (i = 0; i < alloced; i++) {
1029 rx_bufpost = (struct msgbuf_rx_ioctl_resp_or_event *)ret_ptr;
1030 memset(rx_bufpost, 0, sizeof(*rx_bufpost));
1031
1032 skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_CTL_PKT_SIZE);
1033
1034 if (skb == NULL) {
1035 bphy_err(drvr, "Failed to alloc SKB\n");
1036 brcmf_commonring_write_cancel(commonring, alloced - i);
1037 break;
1038 }
1039
1040 pktlen = skb->len;
1041 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
1042 msgbuf->rx_pktids, skb, 0,
1043 &physaddr, &pktid)) {
1044 dev_kfree_skb_any(skb);
1045 bphy_err(drvr, "No PKTID available !!\n");
1046 brcmf_commonring_write_cancel(commonring, alloced - i);
1047 break;
1048 }
1049 if (event_buf)
1050 rx_bufpost->msg.msgtype = MSGBUF_TYPE_EVENT_BUF_POST;
1051 else
1052 rx_bufpost->msg.msgtype =
1053 MSGBUF_TYPE_IOCTLRESP_BUF_POST;
1054 rx_bufpost->msg.request_id = cpu_to_le32(pktid);
1055
1056 address = (u64)physaddr;
1057 rx_bufpost->host_buf_len = cpu_to_le16((u16)pktlen);
1058 rx_bufpost->host_buf_addr.high_addr =
1059 cpu_to_le32(address >> 32);
1060 rx_bufpost->host_buf_addr.low_addr =
1061 cpu_to_le32(address & 0xffffffff);
1062
1063 ret_ptr += brcmf_commonring_len_item(commonring);
1064 }
1065
1066 if (i)
1067 brcmf_commonring_write_complete(commonring);
1068
1069 brcmf_commonring_unlock(commonring);
1070
1071 return i;
1072 }
1073
1074
1075 static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf)
1076 {
1077 u32 count;
1078
1079 count = msgbuf->max_ioctlrespbuf - msgbuf->cur_ioctlrespbuf;
1080 count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, false, count);
1081 msgbuf->cur_ioctlrespbuf += count;
1082 }
1083
1084
1085 static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf *msgbuf)
1086 {
1087 u32 count;
1088
1089 count = msgbuf->max_eventbuf - msgbuf->cur_eventbuf;
1090 count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, true, count);
1091 msgbuf->cur_eventbuf += count;
1092 }
1093
1094
1095 static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
1096 {
1097 struct brcmf_pub *drvr = msgbuf->drvr;
1098 struct msgbuf_rx_event *event;
1099 u32 idx;
1100 u16 buflen;
1101 struct sk_buff *skb;
1102 struct brcmf_if *ifp;
1103
1104 event = (struct msgbuf_rx_event *)buf;
1105 idx = le32_to_cpu(event->msg.request_id);
1106 buflen = le16_to_cpu(event->event_data_len);
1107
1108 if (msgbuf->cur_eventbuf)
1109 msgbuf->cur_eventbuf--;
1110 brcmf_msgbuf_rxbuf_event_post(msgbuf);
1111
1112 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
1113 msgbuf->rx_pktids, idx);
1114 if (!skb)
1115 return;
1116
1117 if (msgbuf->rx_dataoffset)
1118 skb_pull(skb, msgbuf->rx_dataoffset);
1119
1120 skb_trim(skb, buflen);
1121
1122 ifp = brcmf_get_ifp(msgbuf->drvr, event->msg.ifidx);
1123 if (!ifp || !ifp->ndev) {
1124 bphy_err(drvr, "Received pkt for invalid ifidx %d\n",
1125 event->msg.ifidx);
1126 goto exit;
1127 }
1128
1129 skb->protocol = eth_type_trans(skb, ifp->ndev);
1130
1131 brcmf_fweh_process_skb(ifp->drvr, skb, 0, GFP_KERNEL);
1132
1133 exit:
1134 brcmu_pkt_buf_free_skb(skb);
1135 }
1136
1137
1138 static void
1139 brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
1140 {
1141 struct brcmf_pub *drvr = msgbuf->drvr;
1142 struct msgbuf_rx_complete *rx_complete;
1143 struct sk_buff *skb;
1144 u16 data_offset;
1145 u16 buflen;
1146 u16 flags;
1147 u32 idx;
1148 struct brcmf_if *ifp;
1149
1150 brcmf_msgbuf_update_rxbufpost_count(msgbuf, 1);
1151
1152 rx_complete = (struct msgbuf_rx_complete *)buf;
1153 data_offset = le16_to_cpu(rx_complete->data_offset);
1154 buflen = le16_to_cpu(rx_complete->data_len);
1155 idx = le32_to_cpu(rx_complete->msg.request_id);
1156 flags = le16_to_cpu(rx_complete->flags);
1157
1158 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
1159 msgbuf->rx_pktids, idx);
1160 if (!skb)
1161 return;
1162
1163 if (data_offset)
1164 skb_pull(skb, data_offset);
1165 else if (msgbuf->rx_dataoffset)
1166 skb_pull(skb, msgbuf->rx_dataoffset);
1167
1168 skb_trim(skb, buflen);
1169
1170 if ((flags & BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK) ==
1171 BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11) {
1172 ifp = msgbuf->drvr->mon_if;
1173
1174 if (!ifp) {
1175 bphy_err(drvr, "Received unexpected monitor pkt\n");
1176 brcmu_pkt_buf_free_skb(skb);
1177 return;
1178 }
1179
1180 brcmf_netif_mon_rx(ifp, skb);
1181 return;
1182 }
1183
1184 ifp = brcmf_get_ifp(msgbuf->drvr, rx_complete->msg.ifidx);
1185 if (!ifp || !ifp->ndev) {
1186 bphy_err(drvr, "Received pkt for invalid ifidx %d\n",
1187 rx_complete->msg.ifidx);
1188 brcmu_pkt_buf_free_skb(skb);
1189 return;
1190 }
1191
1192 skb->protocol = eth_type_trans(skb, ifp->ndev);
1193 brcmf_netif_rx(ifp, skb);
1194 }
1195
1196 static void brcmf_msgbuf_process_gen_status(struct brcmf_msgbuf *msgbuf,
1197 void *buf)
1198 {
1199 struct msgbuf_gen_status *gen_status = buf;
1200 struct brcmf_pub *drvr = msgbuf->drvr;
1201 int err;
1202
1203 err = le16_to_cpu(gen_status->compl_hdr.status);
1204 if (err)
1205 bphy_err(drvr, "Firmware reported general error: %d\n", err);
1206 }
1207
1208 static void brcmf_msgbuf_process_ring_status(struct brcmf_msgbuf *msgbuf,
1209 void *buf)
1210 {
1211 struct msgbuf_ring_status *ring_status = buf;
1212 struct brcmf_pub *drvr = msgbuf->drvr;
1213 int err;
1214
1215 err = le16_to_cpu(ring_status->compl_hdr.status);
1216 if (err) {
1217 int ring = le16_to_cpu(ring_status->compl_hdr.flow_ring_id);
1218
1219 bphy_err(drvr, "Firmware reported ring %d error: %d\n", ring,
1220 err);
1221 }
1222 }
1223
1224 static void
1225 brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf,
1226 void *buf)
1227 {
1228 struct brcmf_pub *drvr = msgbuf->drvr;
1229 struct msgbuf_flowring_create_resp *flowring_create_resp;
1230 u16 status;
1231 u16 flowid;
1232
1233 flowring_create_resp = (struct msgbuf_flowring_create_resp *)buf;
1234
1235 flowid = le16_to_cpu(flowring_create_resp->compl_hdr.flow_ring_id);
1236 flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
1237 status = le16_to_cpu(flowring_create_resp->compl_hdr.status);
1238
1239 if (status) {
1240 bphy_err(drvr, "Flowring creation failed, code %d\n", status);
1241 brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1242 return;
1243 }
1244 brcmf_dbg(MSGBUF, "Flowring %d Create response status %d\n", flowid,
1245 status);
1246
1247 brcmf_flowring_open(msgbuf->flow, flowid);
1248
1249 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true);
1250 }
1251
1252
1253 static void
1254 brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf,
1255 void *buf)
1256 {
1257 struct brcmf_pub *drvr = msgbuf->drvr;
1258 struct msgbuf_flowring_delete_resp *flowring_delete_resp;
1259 u16 status;
1260 u16 flowid;
1261
1262 flowring_delete_resp = (struct msgbuf_flowring_delete_resp *)buf;
1263
1264 flowid = le16_to_cpu(flowring_delete_resp->compl_hdr.flow_ring_id);
1265 flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
1266 status = le16_to_cpu(flowring_delete_resp->compl_hdr.status);
1267
1268 if (status) {
1269 bphy_err(drvr, "Flowring deletion failed, code %d\n", status);
1270 brcmf_flowring_delete(msgbuf->flow, flowid);
1271 return;
1272 }
1273 brcmf_dbg(MSGBUF, "Flowring %d Delete response status %d\n", flowid,
1274 status);
1275
1276 brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1277 }
1278
1279
1280 static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf)
1281 {
1282 struct brcmf_pub *drvr = msgbuf->drvr;
1283 struct msgbuf_common_hdr *msg;
1284
1285 msg = (struct msgbuf_common_hdr *)buf;
1286 switch (msg->msgtype) {
1287 case MSGBUF_TYPE_GEN_STATUS:
1288 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_GEN_STATUS\n");
1289 brcmf_msgbuf_process_gen_status(msgbuf, buf);
1290 break;
1291 case MSGBUF_TYPE_RING_STATUS:
1292 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RING_STATUS\n");
1293 brcmf_msgbuf_process_ring_status(msgbuf, buf);
1294 break;
1295 case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1296 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT\n");
1297 brcmf_msgbuf_process_flow_ring_create_response(msgbuf, buf);
1298 break;
1299 case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1300 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT\n");
1301 brcmf_msgbuf_process_flow_ring_delete_response(msgbuf, buf);
1302 break;
1303 case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1304 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTLPTR_REQ_ACK\n");
1305 break;
1306 case MSGBUF_TYPE_IOCTL_CMPLT:
1307 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTL_CMPLT\n");
1308 brcmf_msgbuf_process_ioctl_complete(msgbuf, buf);
1309 break;
1310 case MSGBUF_TYPE_WL_EVENT:
1311 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_WL_EVENT\n");
1312 brcmf_msgbuf_process_event(msgbuf, buf);
1313 break;
1314 case MSGBUF_TYPE_TX_STATUS:
1315 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_TX_STATUS\n");
1316 brcmf_msgbuf_process_txstatus(msgbuf, buf);
1317 break;
1318 case MSGBUF_TYPE_RX_CMPLT:
1319 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RX_CMPLT\n");
1320 brcmf_msgbuf_process_rx_complete(msgbuf, buf);
1321 break;
1322 default:
1323 bphy_err(drvr, "Unsupported msgtype %d\n", msg->msgtype);
1324 break;
1325 }
1326 }
1327
1328
1329 static void brcmf_msgbuf_process_rx(struct brcmf_msgbuf *msgbuf,
1330 struct brcmf_commonring *commonring)
1331 {
1332 void *buf;
1333 u16 count;
1334 u16 processed;
1335
1336 again:
1337 buf = brcmf_commonring_get_read_ptr(commonring, &count);
1338 if (buf == NULL)
1339 return;
1340
1341 processed = 0;
1342 while (count) {
1343 brcmf_msgbuf_process_msgtype(msgbuf,
1344 buf + msgbuf->rx_dataoffset);
1345 buf += brcmf_commonring_len_item(commonring);
1346 processed++;
1347 if (processed == BRCMF_MSGBUF_UPDATE_RX_PTR_THRS) {
1348 brcmf_commonring_read_complete(commonring, processed);
1349 processed = 0;
1350 }
1351 count--;
1352 }
1353 if (processed)
1354 brcmf_commonring_read_complete(commonring, processed);
1355
1356 if (commonring->r_ptr == 0)
1357 goto again;
1358 }
1359
1360
1361 int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
1362 {
1363 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1364 struct brcmf_pub *drvr = bus_if->drvr;
1365 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1366 struct brcmf_commonring *commonring;
1367 void *buf;
1368 u32 flowid;
1369 int qlen;
1370
1371 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
1372 brcmf_msgbuf_process_rx(msgbuf, buf);
1373 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE];
1374 brcmf_msgbuf_process_rx(msgbuf, buf);
1375 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE];
1376 brcmf_msgbuf_process_rx(msgbuf, buf);
1377
1378 for_each_set_bit(flowid, msgbuf->txstatus_done_map,
1379 msgbuf->max_flowrings) {
1380 clear_bit(flowid, msgbuf->txstatus_done_map);
1381 commonring = msgbuf->flowrings[flowid];
1382 qlen = brcmf_flowring_qlen(msgbuf->flow, flowid);
1383 if ((qlen > BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) ||
1384 ((qlen) && (atomic_read(&commonring->outstanding_tx) <
1385 BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS)))
1386 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true);
1387 }
1388
1389 return 0;
1390 }
1391
1392
1393 void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid)
1394 {
1395 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1396 struct msgbuf_tx_flowring_delete_req *delete;
1397 struct brcmf_commonring *commonring;
1398 void *ret_ptr;
1399 u8 ifidx;
1400 int err;
1401
1402
1403 if (drvr->bus_if->state != BRCMF_BUS_UP) {
1404 brcmf_dbg(MSGBUF, "bus down, flowring will be removed\n");
1405 brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1406 return;
1407 }
1408
1409 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
1410 brcmf_commonring_lock(commonring);
1411 ret_ptr = brcmf_commonring_reserve_for_write(commonring);
1412 if (!ret_ptr) {
1413 bphy_err(drvr, "FW unaware, flowring will be removed !!\n");
1414 brcmf_commonring_unlock(commonring);
1415 brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1416 return;
1417 }
1418
1419 delete = (struct msgbuf_tx_flowring_delete_req *)ret_ptr;
1420
1421 ifidx = brcmf_flowring_ifidx_get(msgbuf->flow, flowid);
1422
1423 delete->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1424 delete->msg.ifidx = ifidx;
1425 delete->msg.request_id = 0;
1426
1427 delete->flow_ring_id = cpu_to_le16(flowid +
1428 BRCMF_H2D_MSGRING_FLOWRING_IDSTART);
1429 delete->reason = 0;
1430
1431 brcmf_dbg(MSGBUF, "Send Flow Delete Req flow ID %d, ifindex %d\n",
1432 flowid, ifidx);
1433
1434 err = brcmf_commonring_write_complete(commonring);
1435 brcmf_commonring_unlock(commonring);
1436 if (err) {
1437 bphy_err(drvr, "Failed to submit RING_DELETE, flowring will be removed\n");
1438 brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1439 }
1440 }
1441
1442 #ifdef DEBUG
1443 static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
1444 {
1445 struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
1446 struct brcmf_pub *drvr = bus_if->drvr;
1447 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1448 struct brcmf_commonring *commonring;
1449 u16 i;
1450 struct brcmf_flowring_ring *ring;
1451 struct brcmf_flowring_hash *hash;
1452
1453 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
1454 seq_printf(seq, "h2d_ctl_submit: rp %4u, wp %4u, depth %4u\n",
1455 commonring->r_ptr, commonring->w_ptr, commonring->depth);
1456 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT];
1457 seq_printf(seq, "h2d_rx_submit: rp %4u, wp %4u, depth %4u\n",
1458 commonring->r_ptr, commonring->w_ptr, commonring->depth);
1459 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE];
1460 seq_printf(seq, "d2h_ctl_cmplt: rp %4u, wp %4u, depth %4u\n",
1461 commonring->r_ptr, commonring->w_ptr, commonring->depth);
1462 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE];
1463 seq_printf(seq, "d2h_tx_cmplt: rp %4u, wp %4u, depth %4u\n",
1464 commonring->r_ptr, commonring->w_ptr, commonring->depth);
1465 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
1466 seq_printf(seq, "d2h_rx_cmplt: rp %4u, wp %4u, depth %4u\n",
1467 commonring->r_ptr, commonring->w_ptr, commonring->depth);
1468
1469 seq_printf(seq, "\nh2d_flowrings: depth %u\n",
1470 BRCMF_H2D_TXFLOWRING_MAX_ITEM);
1471 seq_puts(seq, "Active flowrings:\n");
1472 for (i = 0; i < msgbuf->flow->nrofrings; i++) {
1473 if (!msgbuf->flow->rings[i])
1474 continue;
1475 ring = msgbuf->flow->rings[i];
1476 if (ring->status != RING_OPEN)
1477 continue;
1478 commonring = msgbuf->flowrings[i];
1479 hash = &msgbuf->flow->hash[ring->hash_id];
1480 seq_printf(seq, "id %3u: rp %4u, wp %4u, qlen %4u, blocked %u\n"
1481 " ifidx %u, fifo %u, da %pM\n",
1482 i, commonring->r_ptr, commonring->w_ptr,
1483 skb_queue_len(&ring->skblist), ring->blocked,
1484 hash->ifidx, hash->fifo, hash->mac);
1485 }
1486
1487 return 0;
1488 }
1489 #else
1490 static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
1491 {
1492 return 0;
1493 }
1494 #endif
1495
1496 static void brcmf_msgbuf_debugfs_create(struct brcmf_pub *drvr)
1497 {
1498 brcmf_debugfs_add_entry(drvr, "msgbuf_stats", brcmf_msgbuf_stats_read);
1499 }
1500
1501 int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
1502 {
1503 struct brcmf_bus_msgbuf *if_msgbuf;
1504 struct brcmf_msgbuf *msgbuf;
1505 u64 address;
1506 u32 count;
1507
1508 if_msgbuf = drvr->bus_if->msgbuf;
1509
1510 if (if_msgbuf->max_flowrings >= BRCMF_FLOWRING_HASHSIZE) {
1511 bphy_err(drvr, "driver not configured for this many flowrings %d\n",
1512 if_msgbuf->max_flowrings);
1513 if_msgbuf->max_flowrings = BRCMF_FLOWRING_HASHSIZE - 1;
1514 }
1515
1516 msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL);
1517 if (!msgbuf)
1518 goto fail;
1519
1520 msgbuf->txflow_wq = create_singlethread_workqueue("msgbuf_txflow");
1521 if (msgbuf->txflow_wq == NULL) {
1522 bphy_err(drvr, "workqueue creation failed\n");
1523 goto fail;
1524 }
1525 INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker);
1526 count = BITS_TO_LONGS(if_msgbuf->max_flowrings);
1527 count = count * sizeof(unsigned long);
1528 msgbuf->flow_map = kzalloc(count, GFP_KERNEL);
1529 if (!msgbuf->flow_map)
1530 goto fail;
1531
1532 msgbuf->txstatus_done_map = kzalloc(count, GFP_KERNEL);
1533 if (!msgbuf->txstatus_done_map)
1534 goto fail;
1535
1536 msgbuf->drvr = drvr;
1537 msgbuf->ioctbuf = dma_alloc_coherent(drvr->bus_if->dev,
1538 BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1539 &msgbuf->ioctbuf_handle,
1540 GFP_KERNEL);
1541 if (!msgbuf->ioctbuf)
1542 goto fail;
1543 address = (u64)msgbuf->ioctbuf_handle;
1544 msgbuf->ioctbuf_phys_hi = address >> 32;
1545 msgbuf->ioctbuf_phys_lo = address & 0xffffffff;
1546
1547 drvr->proto->hdrpull = brcmf_msgbuf_hdrpull;
1548 drvr->proto->query_dcmd = brcmf_msgbuf_query_dcmd;
1549 drvr->proto->set_dcmd = brcmf_msgbuf_set_dcmd;
1550 drvr->proto->tx_queue_data = brcmf_msgbuf_tx_queue_data;
1551 drvr->proto->configure_addr_mode = brcmf_msgbuf_configure_addr_mode;
1552 drvr->proto->delete_peer = brcmf_msgbuf_delete_peer;
1553 drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer;
1554 drvr->proto->rxreorder = brcmf_msgbuf_rxreorder;
1555 drvr->proto->debugfs_create = brcmf_msgbuf_debugfs_create;
1556 drvr->proto->pd = msgbuf;
1557
1558 init_waitqueue_head(&msgbuf->ioctl_resp_wait);
1559
1560 msgbuf->commonrings =
1561 (struct brcmf_commonring **)if_msgbuf->commonrings;
1562 msgbuf->flowrings = (struct brcmf_commonring **)if_msgbuf->flowrings;
1563 msgbuf->max_flowrings = if_msgbuf->max_flowrings;
1564 msgbuf->flowring_dma_handle =
1565 kcalloc(msgbuf->max_flowrings,
1566 sizeof(*msgbuf->flowring_dma_handle), GFP_KERNEL);
1567 if (!msgbuf->flowring_dma_handle)
1568 goto fail;
1569
1570 msgbuf->rx_dataoffset = if_msgbuf->rx_dataoffset;
1571 msgbuf->max_rxbufpost = if_msgbuf->max_rxbufpost;
1572
1573 msgbuf->max_ioctlrespbuf = BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST;
1574 msgbuf->max_eventbuf = BRCMF_MSGBUF_MAX_EVENTBUF_POST;
1575
1576 msgbuf->tx_pktids = brcmf_msgbuf_init_pktids(NR_TX_PKTIDS,
1577 DMA_TO_DEVICE);
1578 if (!msgbuf->tx_pktids)
1579 goto fail;
1580 msgbuf->rx_pktids = brcmf_msgbuf_init_pktids(NR_RX_PKTIDS,
1581 DMA_FROM_DEVICE);
1582 if (!msgbuf->rx_pktids)
1583 goto fail;
1584
1585 msgbuf->flow = brcmf_flowring_attach(drvr->bus_if->dev,
1586 if_msgbuf->max_flowrings);
1587 if (!msgbuf->flow)
1588 goto fail;
1589
1590
1591 brcmf_dbg(MSGBUF, "Feeding buffers, rx data %d, rx event %d, rx ioctl resp %d\n",
1592 msgbuf->max_rxbufpost, msgbuf->max_eventbuf,
1593 msgbuf->max_ioctlrespbuf);
1594 count = 0;
1595 do {
1596 brcmf_msgbuf_rxbuf_data_fill(msgbuf);
1597 if (msgbuf->max_rxbufpost != msgbuf->rxbufpost)
1598 msleep(10);
1599 else
1600 break;
1601 count++;
1602 } while (count < 10);
1603 brcmf_msgbuf_rxbuf_event_post(msgbuf);
1604 brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf);
1605
1606 INIT_WORK(&msgbuf->flowring_work, brcmf_msgbuf_flowring_worker);
1607 spin_lock_init(&msgbuf->flowring_work_lock);
1608 INIT_LIST_HEAD(&msgbuf->work_queue);
1609
1610 return 0;
1611
1612 fail:
1613 if (msgbuf) {
1614 kfree(msgbuf->flow_map);
1615 kfree(msgbuf->txstatus_done_map);
1616 brcmf_msgbuf_release_pktids(msgbuf);
1617 kfree(msgbuf->flowring_dma_handle);
1618 if (msgbuf->ioctbuf)
1619 dma_free_coherent(drvr->bus_if->dev,
1620 BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1621 msgbuf->ioctbuf,
1622 msgbuf->ioctbuf_handle);
1623 if (msgbuf->txflow_wq)
1624 destroy_workqueue(msgbuf->txflow_wq);
1625 kfree(msgbuf);
1626 }
1627 return -ENOMEM;
1628 }
1629
1630
1631 void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr)
1632 {
1633 struct brcmf_msgbuf *msgbuf;
1634 struct brcmf_msgbuf_work_item *work;
1635
1636 brcmf_dbg(TRACE, "Enter\n");
1637 if (drvr->proto->pd) {
1638 msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1639 cancel_work_sync(&msgbuf->flowring_work);
1640 while (!list_empty(&msgbuf->work_queue)) {
1641 work = list_first_entry(&msgbuf->work_queue,
1642 struct brcmf_msgbuf_work_item,
1643 queue);
1644 list_del(&work->queue);
1645 kfree(work);
1646 }
1647 kfree(msgbuf->flow_map);
1648 kfree(msgbuf->txstatus_done_map);
1649 if (msgbuf->txflow_wq)
1650 destroy_workqueue(msgbuf->txflow_wq);
1651
1652 brcmf_flowring_detach(msgbuf->flow);
1653 dma_free_coherent(drvr->bus_if->dev,
1654 BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1655 msgbuf->ioctbuf, msgbuf->ioctbuf_handle);
1656 brcmf_msgbuf_release_pktids(msgbuf);
1657 kfree(msgbuf->flowring_dma_handle);
1658 kfree(msgbuf);
1659 drvr->proto->pd = NULL;
1660 }
1661 }