Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2004-2011 Atheros Communications Inc.
0003  * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
0004  *
0005  * Permission to use, copy, modify, and/or distribute this software for any
0006  * purpose with or without fee is hereby granted, provided that the above
0007  * copyright notice and this permission notice appear in all copies.
0008  *
0009  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
0010  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
0011  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
0012  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
0013  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
0014  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
0015  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
0016  */
0017 
0018 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0019 
0020 #include "core.h"
0021 #include "debug.h"
0022 #include "htc-ops.h"
0023 #include "trace.h"
0024 
0025 /*
0026  * tid - tid_mux0..tid_mux3
0027  * aid - tid_mux4..tid_mux7
0028  */
0029 #define ATH6KL_TID_MASK 0xf
0030 #define ATH6KL_AID_SHIFT 4
0031 
0032 static inline u8 ath6kl_get_tid(u8 tid_mux)
0033 {
0034     return tid_mux & ATH6KL_TID_MASK;
0035 }
0036 
0037 static inline u8 ath6kl_get_aid(u8 tid_mux)
0038 {
0039     return tid_mux >> ATH6KL_AID_SHIFT;
0040 }
0041 
0042 static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
0043                    u32 *map_no)
0044 {
0045     struct ath6kl *ar = ath6kl_priv(dev);
0046     struct ethhdr *eth_hdr;
0047     u32 i, ep_map = -1;
0048     u8 *datap;
0049 
0050     *map_no = 0;
0051     datap = skb->data;
0052     eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr));
0053 
0054     if (is_multicast_ether_addr(eth_hdr->h_dest))
0055         return ENDPOINT_2;
0056 
0057     for (i = 0; i < ar->node_num; i++) {
0058         if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr,
0059                ETH_ALEN) == 0) {
0060             *map_no = i + 1;
0061             ar->node_map[i].tx_pend++;
0062             return ar->node_map[i].ep_id;
0063         }
0064 
0065         if ((ep_map == -1) && !ar->node_map[i].tx_pend)
0066             ep_map = i;
0067     }
0068 
0069     if (ep_map == -1) {
0070         ep_map = ar->node_num;
0071         ar->node_num++;
0072         if (ar->node_num > MAX_NODE_NUM)
0073             return ENDPOINT_UNUSED;
0074     }
0075 
0076     memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN);
0077 
0078     for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) {
0079         if (!ar->tx_pending[i]) {
0080             ar->node_map[ep_map].ep_id = i;
0081             break;
0082         }
0083 
0084         /*
0085          * No free endpoint is available, start redistribution on
0086          * the inuse endpoints.
0087          */
0088         if (i == ENDPOINT_5) {
0089             ar->node_map[ep_map].ep_id = ar->next_ep_id;
0090             ar->next_ep_id++;
0091             if (ar->next_ep_id > ENDPOINT_5)
0092                 ar->next_ep_id = ENDPOINT_2;
0093         }
0094     }
0095 
0096     *map_no = ep_map + 1;
0097     ar->node_map[ep_map].tx_pend++;
0098 
0099     return ar->node_map[ep_map].ep_id;
0100 }
0101 
0102 static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
0103                 struct ath6kl_vif *vif,
0104                 struct sk_buff *skb,
0105                 u32 *flags)
0106 {
0107     struct ath6kl *ar = vif->ar;
0108     bool is_apsdq_empty = false;
0109     struct ethhdr *datap = (struct ethhdr *) skb->data;
0110     u8 up = 0, traffic_class, *ip_hdr;
0111     u16 ether_type;
0112     struct ath6kl_llc_snap_hdr *llc_hdr;
0113 
0114     if (conn->sta_flags & STA_PS_APSD_TRIGGER) {
0115         /*
0116          * This tx is because of a uAPSD trigger, determine
0117          * more and EOSP bit. Set EOSP if queue is empty
0118          * or sufficient frames are delivered for this trigger.
0119          */
0120         spin_lock_bh(&conn->psq_lock);
0121         if (!skb_queue_empty(&conn->apsdq))
0122             *flags |= WMI_DATA_HDR_FLAGS_MORE;
0123         else if (conn->sta_flags & STA_PS_APSD_EOSP)
0124             *flags |= WMI_DATA_HDR_FLAGS_EOSP;
0125         *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
0126         spin_unlock_bh(&conn->psq_lock);
0127         return false;
0128     } else if (!conn->apsd_info) {
0129         return false;
0130     }
0131 
0132     if (test_bit(WMM_ENABLED, &vif->flags)) {
0133         ether_type = be16_to_cpu(datap->h_proto);
0134         if (is_ethertype(ether_type)) {
0135             /* packet is in DIX format  */
0136             ip_hdr = (u8 *)(datap + 1);
0137         } else {
0138             /* packet is in 802.3 format */
0139             llc_hdr = (struct ath6kl_llc_snap_hdr *)
0140                             (datap + 1);
0141             ether_type = be16_to_cpu(llc_hdr->eth_type);
0142             ip_hdr = (u8 *)(llc_hdr + 1);
0143         }
0144 
0145         if (ether_type == IP_ETHERTYPE)
0146             up = ath6kl_wmi_determine_user_priority(
0147                             ip_hdr, 0);
0148     }
0149 
0150     traffic_class = ath6kl_wmi_get_traffic_class(up);
0151 
0152     if ((conn->apsd_info & (1 << traffic_class)) == 0)
0153         return false;
0154 
0155     /* Queue the frames if the STA is sleeping */
0156     spin_lock_bh(&conn->psq_lock);
0157     is_apsdq_empty = skb_queue_empty(&conn->apsdq);
0158     skb_queue_tail(&conn->apsdq, skb);
0159     spin_unlock_bh(&conn->psq_lock);
0160 
0161     /*
0162      * If this is the first pkt getting queued
0163      * for this STA, update the PVB for this STA
0164      */
0165     if (is_apsdq_empty) {
0166         ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
0167                           vif->fw_vif_idx,
0168                           conn->aid, 1, 0);
0169     }
0170     *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
0171 
0172     return true;
0173 }
0174 
0175 static bool ath6kl_process_psq(struct ath6kl_sta *conn,
0176                 struct ath6kl_vif *vif,
0177                 struct sk_buff *skb,
0178                 u32 *flags)
0179 {
0180     bool is_psq_empty = false;
0181     struct ath6kl *ar = vif->ar;
0182 
0183     if (conn->sta_flags & STA_PS_POLLED) {
0184         spin_lock_bh(&conn->psq_lock);
0185         if (!skb_queue_empty(&conn->psq))
0186             *flags |= WMI_DATA_HDR_FLAGS_MORE;
0187         spin_unlock_bh(&conn->psq_lock);
0188         return false;
0189     }
0190 
0191     /* Queue the frames if the STA is sleeping */
0192     spin_lock_bh(&conn->psq_lock);
0193     is_psq_empty = skb_queue_empty(&conn->psq);
0194     skb_queue_tail(&conn->psq, skb);
0195     spin_unlock_bh(&conn->psq_lock);
0196 
0197     /*
0198      * If this is the first pkt getting queued
0199      * for this STA, update the PVB for this
0200      * STA.
0201      */
0202     if (is_psq_empty)
0203         ath6kl_wmi_set_pvb_cmd(ar->wmi,
0204                        vif->fw_vif_idx,
0205                        conn->aid, 1);
0206     return true;
0207 }
0208 
0209 static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
0210                 u32 *flags)
0211 {
0212     struct ethhdr *datap = (struct ethhdr *) skb->data;
0213     struct ath6kl_sta *conn = NULL;
0214     bool ps_queued = false;
0215     struct ath6kl *ar = vif->ar;
0216 
0217     if (is_multicast_ether_addr(datap->h_dest)) {
0218         u8 ctr = 0;
0219         bool q_mcast = false;
0220 
0221         for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
0222             if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {
0223                 q_mcast = true;
0224                 break;
0225             }
0226         }
0227 
0228         if (q_mcast) {
0229             /*
0230              * If this transmit is not because of a Dtim Expiry
0231              * q it.
0232              */
0233             if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
0234                 bool is_mcastq_empty = false;
0235 
0236                 spin_lock_bh(&ar->mcastpsq_lock);
0237                 is_mcastq_empty =
0238                     skb_queue_empty(&ar->mcastpsq);
0239                 skb_queue_tail(&ar->mcastpsq, skb);
0240                 spin_unlock_bh(&ar->mcastpsq_lock);
0241 
0242                 /*
0243                  * If this is the first Mcast pkt getting
0244                  * queued indicate to the target to set the
0245                  * BitmapControl LSB of the TIM IE.
0246                  */
0247                 if (is_mcastq_empty)
0248                     ath6kl_wmi_set_pvb_cmd(ar->wmi,
0249                                    vif->fw_vif_idx,
0250                                    MCAST_AID, 1);
0251 
0252                 ps_queued = true;
0253             } else {
0254                 /*
0255                  * This transmit is because of Dtim expiry.
0256                  * Determine if MoreData bit has to be set.
0257                  */
0258                 spin_lock_bh(&ar->mcastpsq_lock);
0259                 if (!skb_queue_empty(&ar->mcastpsq))
0260                     *flags |= WMI_DATA_HDR_FLAGS_MORE;
0261                 spin_unlock_bh(&ar->mcastpsq_lock);
0262             }
0263         }
0264     } else {
0265         conn = ath6kl_find_sta(vif, datap->h_dest);
0266         if (!conn) {
0267             dev_kfree_skb(skb);
0268 
0269             /* Inform the caller that the skb is consumed */
0270             return true;
0271         }
0272 
0273         if (conn->sta_flags & STA_PS_SLEEP) {
0274             ps_queued = ath6kl_process_uapsdq(conn,
0275                         vif, skb, flags);
0276             if (!(*flags & WMI_DATA_HDR_FLAGS_UAPSD))
0277                 ps_queued = ath6kl_process_psq(conn,
0278                         vif, skb, flags);
0279         }
0280     }
0281     return ps_queued;
0282 }
0283 
0284 /* Tx functions */
0285 
0286 int ath6kl_control_tx(void *devt, struct sk_buff *skb,
0287               enum htc_endpoint_id eid)
0288 {
0289     struct ath6kl *ar = devt;
0290     int status = 0;
0291     struct ath6kl_cookie *cookie = NULL;
0292 
0293     trace_ath6kl_wmi_cmd(skb->data, skb->len);
0294 
0295     if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) {
0296         dev_kfree_skb(skb);
0297         return -EACCES;
0298     }
0299 
0300     if (WARN_ON_ONCE(eid == ENDPOINT_UNUSED ||
0301              eid >= ENDPOINT_MAX)) {
0302         status = -EINVAL;
0303         goto fail_ctrl_tx;
0304     }
0305 
0306     spin_lock_bh(&ar->lock);
0307 
0308     ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
0309            "%s: skb=0x%p, len=0x%x eid =%d\n", __func__,
0310            skb, skb->len, eid);
0311 
0312     if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) {
0313         /*
0314          * Control endpoint is full, don't allocate resources, we
0315          * are just going to drop this packet.
0316          */
0317         cookie = NULL;
0318         ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
0319                skb, skb->len);
0320     } else {
0321         cookie = ath6kl_alloc_cookie(ar);
0322     }
0323 
0324     if (cookie == NULL) {
0325         spin_unlock_bh(&ar->lock);
0326         status = -ENOMEM;
0327         goto fail_ctrl_tx;
0328     }
0329 
0330     ar->tx_pending[eid]++;
0331 
0332     if (eid != ar->ctrl_ep)
0333         ar->total_tx_data_pend++;
0334 
0335     spin_unlock_bh(&ar->lock);
0336 
0337     cookie->skb = skb;
0338     cookie->map_no = 0;
0339     set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
0340              eid, ATH6KL_CONTROL_PKT_TAG);
0341     cookie->htc_pkt.skb = skb;
0342 
0343     /*
0344      * This interface is asynchronous, if there is an error, cleanup
0345      * will happen in the TX completion callback.
0346      */
0347     ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
0348 
0349     return 0;
0350 
0351 fail_ctrl_tx:
0352     dev_kfree_skb(skb);
0353     return status;
0354 }
0355 
0356 netdev_tx_t ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
0357 {
0358     struct ath6kl *ar = ath6kl_priv(dev);
0359     struct ath6kl_cookie *cookie = NULL;
0360     enum htc_endpoint_id eid = ENDPOINT_UNUSED;
0361     struct ath6kl_vif *vif = netdev_priv(dev);
0362     u32 map_no = 0;
0363     u16 htc_tag = ATH6KL_DATA_PKT_TAG;
0364     u8 ac = 99; /* initialize to unmapped ac */
0365     bool chk_adhoc_ps_mapping = false;
0366     int ret;
0367     struct wmi_tx_meta_v2 meta_v2;
0368     void *meta;
0369     u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed;
0370     u8 meta_ver = 0;
0371     u32 flags = 0;
0372 
0373     ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
0374            "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
0375            skb, skb->data, skb->len);
0376 
0377     /* If target is not associated */
0378     if (!test_bit(CONNECTED, &vif->flags))
0379         goto fail_tx;
0380 
0381     if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON))
0382         goto fail_tx;
0383 
0384     if (!test_bit(WMI_READY, &ar->flag))
0385         goto fail_tx;
0386 
0387     /* AP mode Power saving processing */
0388     if (vif->nw_type == AP_NETWORK) {
0389         if (ath6kl_powersave_ap(vif, skb, &flags))
0390             return 0;
0391     }
0392 
0393     if (test_bit(WMI_ENABLED, &ar->flag)) {
0394         if ((dev->features & NETIF_F_IP_CSUM) &&
0395             (csum == CHECKSUM_PARTIAL)) {
0396             csum_start = skb->csum_start -
0397                     (skb_network_header(skb) - skb->head) +
0398                     sizeof(struct ath6kl_llc_snap_hdr);
0399             csum_dest = skb->csum_offset + csum_start;
0400         }
0401 
0402         if (skb_cow_head(skb, dev->needed_headroom)) {
0403             dev->stats.tx_dropped++;
0404             kfree_skb(skb);
0405             return 0;
0406         }
0407 
0408         if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
0409             ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
0410             goto fail_tx;
0411         }
0412 
0413         if ((dev->features & NETIF_F_IP_CSUM) &&
0414             (csum == CHECKSUM_PARTIAL)) {
0415             meta_v2.csum_start = csum_start;
0416             meta_v2.csum_dest = csum_dest;
0417 
0418             /* instruct target to calculate checksum */
0419             meta_v2.csum_flags = WMI_META_V2_FLAG_CSUM_OFFLOAD;
0420             meta_ver = WMI_META_VERSION_2;
0421             meta = &meta_v2;
0422         } else {
0423             meta_ver = 0;
0424             meta = NULL;
0425         }
0426 
0427         ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb,
0428                 DATA_MSGTYPE, flags, 0,
0429                 meta_ver,
0430                 meta, vif->fw_vif_idx);
0431 
0432         if (ret) {
0433             ath6kl_warn("failed to add wmi data header:%d\n"
0434                 , ret);
0435             goto fail_tx;
0436         }
0437 
0438         if ((vif->nw_type == ADHOC_NETWORK) &&
0439             ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
0440             chk_adhoc_ps_mapping = true;
0441         else {
0442             /* get the stream mapping */
0443             ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
0444                     vif->fw_vif_idx, skb,
0445                     0, test_bit(WMM_ENABLED, &vif->flags), &ac);
0446             if (ret)
0447                 goto fail_tx;
0448         }
0449     } else {
0450         goto fail_tx;
0451     }
0452 
0453     spin_lock_bh(&ar->lock);
0454 
0455     if (chk_adhoc_ps_mapping)
0456         eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
0457     else
0458         eid = ar->ac2ep_map[ac];
0459 
0460     if (eid == 0 || eid == ENDPOINT_UNUSED) {
0461         ath6kl_err("eid %d is not mapped!\n", eid);
0462         spin_unlock_bh(&ar->lock);
0463         goto fail_tx;
0464     }
0465 
0466     /* allocate resource for this packet */
0467     cookie = ath6kl_alloc_cookie(ar);
0468 
0469     if (!cookie) {
0470         spin_unlock_bh(&ar->lock);
0471         goto fail_tx;
0472     }
0473 
0474     /* update counts while the lock is held */
0475     ar->tx_pending[eid]++;
0476     ar->total_tx_data_pend++;
0477 
0478     spin_unlock_bh(&ar->lock);
0479 
0480     if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) &&
0481         skb_cloned(skb)) {
0482         /*
0483          * We will touch (move the buffer data to align it. Since the
0484          * skb buffer is cloned and not only the header is changed, we
0485          * have to copy it to allow the changes. Since we are copying
0486          * the data here, we may as well align it by reserving suitable
0487          * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
0488          */
0489         struct sk_buff *nskb;
0490 
0491         nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC);
0492         if (nskb == NULL)
0493             goto fail_tx;
0494         kfree_skb(skb);
0495         skb = nskb;
0496     }
0497 
0498     cookie->skb = skb;
0499     cookie->map_no = map_no;
0500     set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
0501              eid, htc_tag);
0502     cookie->htc_pkt.skb = skb;
0503 
0504     ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
0505             skb->data, skb->len);
0506 
0507     /*
0508      * HTC interface is asynchronous, if this fails, cleanup will
0509      * happen in the ath6kl_tx_complete callback.
0510      */
0511     ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
0512 
0513     return 0;
0514 
0515 fail_tx:
0516     dev_kfree_skb(skb);
0517 
0518     dev->stats.tx_dropped++;
0519     dev->stats.tx_aborted_errors++;
0520 
0521     return 0;
0522 }
0523 
0524 /* indicate tx activity or inactivity on a WMI stream */
0525 void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
0526 {
0527     struct ath6kl *ar = devt;
0528     enum htc_endpoint_id eid;
0529     int i;
0530 
0531     eid = ar->ac2ep_map[traffic_class];
0532 
0533     if (!test_bit(WMI_ENABLED, &ar->flag))
0534         goto notify_htc;
0535 
0536     spin_lock_bh(&ar->lock);
0537 
0538     ar->ac_stream_active[traffic_class] = active;
0539 
0540     if (active) {
0541         /*
0542          * Keep track of the active stream with the highest
0543          * priority.
0544          */
0545         if (ar->ac_stream_pri_map[traffic_class] >
0546             ar->hiac_stream_active_pri)
0547             /* set the new highest active priority */
0548             ar->hiac_stream_active_pri =
0549                     ar->ac_stream_pri_map[traffic_class];
0550 
0551     } else {
0552         /*
0553          * We may have to search for the next active stream
0554          * that is the highest priority.
0555          */
0556         if (ar->hiac_stream_active_pri ==
0557             ar->ac_stream_pri_map[traffic_class]) {
0558             /*
0559              * The highest priority stream just went inactive
0560              * reset and search for the "next" highest "active"
0561              * priority stream.
0562              */
0563             ar->hiac_stream_active_pri = 0;
0564 
0565             for (i = 0; i < WMM_NUM_AC; i++) {
0566                 if (ar->ac_stream_active[i] &&
0567                     (ar->ac_stream_pri_map[i] >
0568                      ar->hiac_stream_active_pri))
0569                     /*
0570                      * Set the new highest active
0571                      * priority.
0572                      */
0573                     ar->hiac_stream_active_pri =
0574                         ar->ac_stream_pri_map[i];
0575             }
0576         }
0577     }
0578 
0579     spin_unlock_bh(&ar->lock);
0580 
0581 notify_htc:
0582     /* notify HTC, this may cause credit distribution changes */
0583     ath6kl_htc_activity_changed(ar->htc_target, eid, active);
0584 }
0585 
0586 enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
0587                            struct htc_packet *packet)
0588 {
0589     struct ath6kl *ar = target->dev->ar;
0590     struct ath6kl_vif *vif;
0591     enum htc_endpoint_id endpoint = packet->endpoint;
0592     enum htc_send_full_action action = HTC_SEND_FULL_KEEP;
0593 
0594     if (endpoint == ar->ctrl_ep) {
0595         /*
0596          * Under normal WMI if this is getting full, then something
0597          * is running rampant the host should not be exhausting the
0598          * WMI queue with too many commands the only exception to
0599          * this is during testing using endpointping.
0600          */
0601         set_bit(WMI_CTRL_EP_FULL, &ar->flag);
0602         ath6kl_err("wmi ctrl ep is full\n");
0603         ath6kl_recovery_err_notify(ar, ATH6KL_FW_EP_FULL);
0604         return action;
0605     }
0606 
0607     if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
0608         return action;
0609 
0610     /*
0611      * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
0612      * the highest active stream.
0613      */
0614     if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
0615         ar->hiac_stream_active_pri &&
0616         ar->cookie_count <=
0617             target->endpoint[endpoint].tx_drop_packet_threshold)
0618         /*
0619          * Give preference to the highest priority stream by
0620          * dropping the packets which overflowed.
0621          */
0622         action = HTC_SEND_FULL_DROP;
0623 
0624     /* FIXME: Locking */
0625     spin_lock_bh(&ar->list_lock);
0626     list_for_each_entry(vif, &ar->vif_list, list) {
0627         if (vif->nw_type == ADHOC_NETWORK ||
0628             action != HTC_SEND_FULL_DROP) {
0629             spin_unlock_bh(&ar->list_lock);
0630 
0631             set_bit(NETQ_STOPPED, &vif->flags);
0632             netif_stop_queue(vif->ndev);
0633 
0634             return action;
0635         }
0636     }
0637     spin_unlock_bh(&ar->list_lock);
0638 
0639     return action;
0640 }
0641 
0642 /* TODO this needs to be looked at */
0643 static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
0644                      enum htc_endpoint_id eid, u32 map_no)
0645 {
0646     struct ath6kl *ar = vif->ar;
0647     u32 i;
0648 
0649     if (vif->nw_type != ADHOC_NETWORK)
0650         return;
0651 
0652     if (!ar->ibss_ps_enable)
0653         return;
0654 
0655     if (eid == ar->ctrl_ep)
0656         return;
0657 
0658     if (map_no == 0)
0659         return;
0660 
0661     map_no--;
0662     ar->node_map[map_no].tx_pend--;
0663 
0664     if (ar->node_map[map_no].tx_pend)
0665         return;
0666 
0667     if (map_no != (ar->node_num - 1))
0668         return;
0669 
0670     for (i = ar->node_num; i > 0; i--) {
0671         if (ar->node_map[i - 1].tx_pend)
0672             break;
0673 
0674         memset(&ar->node_map[i - 1], 0,
0675                sizeof(struct ath6kl_node_mapping));
0676         ar->node_num--;
0677     }
0678 }
0679 
0680 void ath6kl_tx_complete(struct htc_target *target,
0681             struct list_head *packet_queue)
0682 {
0683     struct ath6kl *ar = target->dev->ar;
0684     struct sk_buff_head skb_queue;
0685     struct htc_packet *packet;
0686     struct sk_buff *skb;
0687     struct ath6kl_cookie *ath6kl_cookie;
0688     u32 map_no = 0;
0689     int status;
0690     enum htc_endpoint_id eid;
0691     bool wake_event = false;
0692     bool flushing[ATH6KL_VIF_MAX] = {false};
0693     u8 if_idx;
0694     struct ath6kl_vif *vif;
0695 
0696     skb_queue_head_init(&skb_queue);
0697 
0698     /* lock the driver as we update internal state */
0699     spin_lock_bh(&ar->lock);
0700 
0701     /* reap completed packets */
0702     while (!list_empty(packet_queue)) {
0703         packet = list_first_entry(packet_queue, struct htc_packet,
0704                       list);
0705         list_del(&packet->list);
0706 
0707         if (WARN_ON_ONCE(packet->endpoint == ENDPOINT_UNUSED ||
0708                  packet->endpoint >= ENDPOINT_MAX))
0709             continue;
0710 
0711         ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
0712         if (WARN_ON_ONCE(!ath6kl_cookie))
0713             continue;
0714 
0715         status = packet->status;
0716         skb = ath6kl_cookie->skb;
0717         eid = packet->endpoint;
0718         map_no = ath6kl_cookie->map_no;
0719 
0720         if (WARN_ON_ONCE(!skb || !skb->data)) {
0721             dev_kfree_skb(skb);
0722             ath6kl_free_cookie(ar, ath6kl_cookie);
0723             continue;
0724         }
0725 
0726         __skb_queue_tail(&skb_queue, skb);
0727 
0728         if (WARN_ON_ONCE(!status && (packet->act_len != skb->len))) {
0729             ath6kl_free_cookie(ar, ath6kl_cookie);
0730             continue;
0731         }
0732 
0733         ar->tx_pending[eid]--;
0734 
0735         if (eid != ar->ctrl_ep)
0736             ar->total_tx_data_pend--;
0737 
0738         if (eid == ar->ctrl_ep) {
0739             if (test_bit(WMI_CTRL_EP_FULL, &ar->flag))
0740                 clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
0741 
0742             if (ar->tx_pending[eid] == 0)
0743                 wake_event = true;
0744         }
0745 
0746         if (eid == ar->ctrl_ep) {
0747             if_idx = wmi_cmd_hdr_get_if_idx(
0748                 (struct wmi_cmd_hdr *) packet->buf);
0749         } else {
0750             if_idx = wmi_data_hdr_get_if_idx(
0751                 (struct wmi_data_hdr *) packet->buf);
0752         }
0753 
0754         vif = ath6kl_get_vif_by_index(ar, if_idx);
0755         if (!vif) {
0756             ath6kl_free_cookie(ar, ath6kl_cookie);
0757             continue;
0758         }
0759 
0760         if (status) {
0761             if (status == -ECANCELED)
0762                 /* a packet was flushed  */
0763                 flushing[if_idx] = true;
0764 
0765             vif->ndev->stats.tx_errors++;
0766 
0767             if (status != -ENOSPC && status != -ECANCELED)
0768                 ath6kl_warn("tx complete error: %d\n", status);
0769 
0770             ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
0771                    "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
0772                    __func__, skb, packet->buf, packet->act_len,
0773                    eid, "error!");
0774         } else {
0775             ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
0776                    "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
0777                    __func__, skb, packet->buf, packet->act_len,
0778                    eid, "OK");
0779 
0780             flushing[if_idx] = false;
0781             vif->ndev->stats.tx_packets++;
0782             vif->ndev->stats.tx_bytes += skb->len;
0783         }
0784 
0785         ath6kl_tx_clear_node_map(vif, eid, map_no);
0786 
0787         ath6kl_free_cookie(ar, ath6kl_cookie);
0788 
0789         if (test_bit(NETQ_STOPPED, &vif->flags))
0790             clear_bit(NETQ_STOPPED, &vif->flags);
0791     }
0792 
0793     spin_unlock_bh(&ar->lock);
0794 
0795     __skb_queue_purge(&skb_queue);
0796 
0797     /* FIXME: Locking */
0798     spin_lock_bh(&ar->list_lock);
0799     list_for_each_entry(vif, &ar->vif_list, list) {
0800         if (test_bit(CONNECTED, &vif->flags) &&
0801             !flushing[vif->fw_vif_idx]) {
0802             spin_unlock_bh(&ar->list_lock);
0803             netif_wake_queue(vif->ndev);
0804             spin_lock_bh(&ar->list_lock);
0805         }
0806     }
0807     spin_unlock_bh(&ar->list_lock);
0808 
0809     if (wake_event)
0810         wake_up(&ar->event_wq);
0811 
0812     return;
0813 }
0814 
0815 void ath6kl_tx_data_cleanup(struct ath6kl *ar)
0816 {
0817     int i;
0818 
0819     /* flush all the data (non-control) streams */
0820     for (i = 0; i < WMM_NUM_AC; i++)
0821         ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i],
0822                       ATH6KL_DATA_PKT_TAG);
0823 }
0824 
0825 /* Rx functions */
0826 
0827 static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
0828                           struct sk_buff *skb)
0829 {
0830     if (!skb)
0831         return;
0832 
0833     skb->dev = dev;
0834 
0835     if (!(skb->dev->flags & IFF_UP)) {
0836         dev_kfree_skb(skb);
0837         return;
0838     }
0839 
0840     skb->protocol = eth_type_trans(skb, skb->dev);
0841 
0842     netif_rx(skb);
0843 }
0844 
0845 static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
0846 {
0847     struct sk_buff *skb;
0848 
0849     while (num) {
0850         skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
0851         if (!skb) {
0852             ath6kl_err("netbuf allocation failed\n");
0853             return;
0854         }
0855         skb_queue_tail(q, skb);
0856         num--;
0857     }
0858 }
0859 
0860 static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
0861 {
0862     struct sk_buff *skb = NULL;
0863 
0864     if (skb_queue_len(&p_aggr->rx_amsdu_freeq) <
0865         (AGGR_NUM_OF_FREE_NETBUFS >> 2))
0866         ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq,
0867                      AGGR_NUM_OF_FREE_NETBUFS);
0868 
0869     skb = skb_dequeue(&p_aggr->rx_amsdu_freeq);
0870 
0871     return skb;
0872 }
0873 
0874 void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
0875 {
0876     struct ath6kl *ar = target->dev->ar;
0877     struct sk_buff *skb;
0878     int rx_buf;
0879     int n_buf_refill;
0880     struct htc_packet *packet;
0881     struct list_head queue;
0882 
0883     n_buf_refill = ATH6KL_MAX_RX_BUFFERS -
0884               ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint);
0885 
0886     if (n_buf_refill <= 0)
0887         return;
0888 
0889     INIT_LIST_HEAD(&queue);
0890 
0891     ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
0892            "%s: providing htc with %d buffers at eid=%d\n",
0893            __func__, n_buf_refill, endpoint);
0894 
0895     for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) {
0896         skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
0897         if (!skb)
0898             break;
0899 
0900         packet = (struct htc_packet *) skb->head;
0901         if (!IS_ALIGNED((unsigned long) skb->data, 4)) {
0902             size_t len = skb_headlen(skb);
0903             skb->data = PTR_ALIGN(skb->data - 4, 4);
0904             skb_set_tail_pointer(skb, len);
0905         }
0906         set_htc_rxpkt_info(packet, skb, skb->data,
0907                    ATH6KL_BUFFER_SIZE, endpoint);
0908         packet->skb = skb;
0909         list_add_tail(&packet->list, &queue);
0910     }
0911 
0912     if (!list_empty(&queue))
0913         ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue);
0914 }
0915 
0916 void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
0917 {
0918     struct htc_packet *packet;
0919     struct sk_buff *skb;
0920 
0921     while (count) {
0922         skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE);
0923         if (!skb)
0924             return;
0925 
0926         packet = (struct htc_packet *) skb->head;
0927         if (!IS_ALIGNED((unsigned long) skb->data, 4)) {
0928             size_t len = skb_headlen(skb);
0929             skb->data = PTR_ALIGN(skb->data - 4, 4);
0930             skb_set_tail_pointer(skb, len);
0931         }
0932         set_htc_rxpkt_info(packet, skb, skb->data,
0933                    ATH6KL_AMSDU_BUFFER_SIZE, 0);
0934         packet->skb = skb;
0935 
0936         spin_lock_bh(&ar->lock);
0937         list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
0938         spin_unlock_bh(&ar->lock);
0939         count--;
0940     }
0941 }
0942 
0943 /*
0944  * Callback to allocate a receive buffer for a pending packet. We use a
0945  * pre-allocated list of buffers of maximum AMSDU size (4K).
0946  */
0947 struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
0948                         enum htc_endpoint_id endpoint,
0949                         int len)
0950 {
0951     struct ath6kl *ar = target->dev->ar;
0952     struct htc_packet *packet = NULL;
0953     struct list_head *pkt_pos;
0954     int refill_cnt = 0, depth = 0;
0955 
0956     ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n",
0957            __func__, endpoint, len);
0958 
0959     if ((len <= ATH6KL_BUFFER_SIZE) ||
0960         (len > ATH6KL_AMSDU_BUFFER_SIZE))
0961         return NULL;
0962 
0963     spin_lock_bh(&ar->lock);
0964 
0965     if (list_empty(&ar->amsdu_rx_buffer_queue)) {
0966         spin_unlock_bh(&ar->lock);
0967         refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS;
0968         goto refill_buf;
0969     }
0970 
0971     packet = list_first_entry(&ar->amsdu_rx_buffer_queue,
0972                   struct htc_packet, list);
0973     list_del(&packet->list);
0974     list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue)
0975         depth++;
0976 
0977     refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth;
0978     spin_unlock_bh(&ar->lock);
0979 
0980     /* set actual endpoint ID */
0981     packet->endpoint = endpoint;
0982 
0983 refill_buf:
0984     if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD)
0985         ath6kl_refill_amsdu_rxbufs(ar, refill_cnt);
0986 
0987     return packet;
0988 }
0989 
0990 static void aggr_slice_amsdu(struct aggr_info *p_aggr,
0991                  struct rxtid *rxtid, struct sk_buff *skb)
0992 {
0993     struct sk_buff *new_skb;
0994     struct ethhdr *hdr;
0995     u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
0996     u8 *framep;
0997 
0998     mac_hdr_len = sizeof(struct ethhdr);
0999     framep = skb->data + mac_hdr_len;
1000     amsdu_len = skb->len - mac_hdr_len;
1001 
1002     while (amsdu_len > mac_hdr_len) {
1003         hdr = (struct ethhdr *) framep;
1004         payload_8023_len = be16_to_cpu(hdr->h_proto);
1005 
1006         if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
1007             payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
1008             ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
1009                    payload_8023_len);
1010             break;
1011         }
1012 
1013         frame_8023_len = payload_8023_len + mac_hdr_len;
1014         new_skb = aggr_get_free_skb(p_aggr);
1015         if (!new_skb) {
1016             ath6kl_err("no buffer available\n");
1017             break;
1018         }
1019 
1020         memcpy(new_skb->data, framep, frame_8023_len);
1021         skb_put(new_skb, frame_8023_len);
1022         if (ath6kl_wmi_dot3_2_dix(new_skb)) {
1023             ath6kl_err("dot3_2_dix error\n");
1024             dev_kfree_skb(new_skb);
1025             break;
1026         }
1027 
1028         skb_queue_tail(&rxtid->q, new_skb);
1029 
1030         /* Is this the last subframe within this aggregate ? */
1031         if ((amsdu_len - frame_8023_len) == 0)
1032             break;
1033 
1034         /* Add the length of A-MSDU subframe padding bytes -
1035          * Round to nearest word.
1036          */
1037         frame_8023_len = ALIGN(frame_8023_len, 4);
1038 
1039         framep += frame_8023_len;
1040         amsdu_len -= frame_8023_len;
1041     }
1042 
1043     dev_kfree_skb(skb);
1044 }
1045 
1046 static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
1047                 u16 seq_no, u8 order)
1048 {
1049     struct sk_buff *skb;
1050     struct rxtid *rxtid;
1051     struct skb_hold_q *node;
1052     u16 idx, idx_end, seq_end;
1053     struct rxtid_stats *stats;
1054 
1055     rxtid = &agg_conn->rx_tid[tid];
1056     stats = &agg_conn->stat[tid];
1057 
1058     spin_lock_bh(&rxtid->lock);
1059     idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1060 
1061     /*
1062      * idx_end is typically the last possible frame in the window,
1063      * but changes to 'the' seq_no, when BAR comes. If seq_no
1064      * is non-zero, we will go up to that and stop.
1065      * Note: last seq no in current window will occupy the same
1066      * index position as index that is just previous to start.
1067      * An imp point : if win_sz is 7, for seq_no space of 4095,
1068      * then, there would be holes when sequence wrap around occurs.
1069      * Target should judiciously choose the win_sz, based on
1070      * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
1071      * 2, 4, 8, 16 win_sz works fine).
1072      * We must deque from "idx" to "idx_end", including both.
1073      */
1074     seq_end = seq_no ? seq_no : rxtid->seq_next;
1075     idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
1076 
1077     do {
1078         node = &rxtid->hold_q[idx];
1079         if ((order == 1) && (!node->skb))
1080             break;
1081 
1082         if (node->skb) {
1083             if (node->is_amsdu)
1084                 aggr_slice_amsdu(agg_conn->aggr_info, rxtid,
1085                          node->skb);
1086             else
1087                 skb_queue_tail(&rxtid->q, node->skb);
1088             node->skb = NULL;
1089         } else {
1090             stats->num_hole++;
1091         }
1092 
1093         rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
1094         idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1095     } while (idx != idx_end);
1096 
1097     spin_unlock_bh(&rxtid->lock);
1098 
1099     stats->num_delivered += skb_queue_len(&rxtid->q);
1100 
1101     while ((skb = skb_dequeue(&rxtid->q)))
1102         ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, skb);
1103 }
1104 
1105 static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
1106                   u16 seq_no,
1107                   bool is_amsdu, struct sk_buff *frame)
1108 {
1109     struct rxtid *rxtid;
1110     struct rxtid_stats *stats;
1111     struct sk_buff *skb;
1112     struct skb_hold_q *node;
1113     u16 idx, st, cur, end;
1114     bool is_queued = false;
1115     u16 extended_end;
1116 
1117     rxtid = &agg_conn->rx_tid[tid];
1118     stats = &agg_conn->stat[tid];
1119 
1120     stats->num_into_aggr++;
1121 
1122     if (!rxtid->aggr) {
1123         if (is_amsdu) {
1124             aggr_slice_amsdu(agg_conn->aggr_info, rxtid, frame);
1125             is_queued = true;
1126             stats->num_amsdu++;
1127             while ((skb = skb_dequeue(&rxtid->q)))
1128                 ath6kl_deliver_frames_to_nw_stack(agg_conn->dev,
1129                                   skb);
1130         }
1131         return is_queued;
1132     }
1133 
1134     /* Check the incoming sequence no, if it's in the window */
1135     st = rxtid->seq_next;
1136     cur = seq_no;
1137     end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
1138 
1139     if (((st < end) && (cur < st || cur > end)) ||
1140         ((st > end) && (cur > end) && (cur < st))) {
1141         extended_end = (end + rxtid->hold_q_sz - 1) &
1142             ATH6KL_MAX_SEQ_NO;
1143 
1144         if (((end < extended_end) &&
1145              (cur < end || cur > extended_end)) ||
1146             ((end > extended_end) && (cur > extended_end) &&
1147              (cur < end))) {
1148             aggr_deque_frms(agg_conn, tid, 0, 0);
1149             spin_lock_bh(&rxtid->lock);
1150             if (cur >= rxtid->hold_q_sz - 1)
1151                 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
1152             else
1153                 rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
1154                           (rxtid->hold_q_sz - 2 - cur);
1155             spin_unlock_bh(&rxtid->lock);
1156         } else {
1157             /*
1158              * Dequeue only those frames that are outside the
1159              * new shifted window.
1160              */
1161             if (cur >= rxtid->hold_q_sz - 1)
1162                 st = cur - (rxtid->hold_q_sz - 1);
1163             else
1164                 st = ATH6KL_MAX_SEQ_NO -
1165                     (rxtid->hold_q_sz - 2 - cur);
1166 
1167             aggr_deque_frms(agg_conn, tid, st, 0);
1168         }
1169 
1170         stats->num_oow++;
1171     }
1172 
1173     idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
1174 
1175     node = &rxtid->hold_q[idx];
1176 
1177     spin_lock_bh(&rxtid->lock);
1178 
1179     /*
1180      * Is the cur frame duplicate or something beyond our window(hold_q
1181      * -> which is 2x, already)?
1182      *
1183      * 1. Duplicate is easy - drop incoming frame.
1184      * 2. Not falling in current sliding window.
1185      *  2a. is the frame_seq_no preceding current tid_seq_no?
1186      *      -> drop the frame. perhaps sender did not get our ACK.
1187      *         this is taken care of above.
1188      *  2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
1189      *      -> Taken care of it above, by moving window forward.
1190      */
1191     dev_kfree_skb(node->skb);
1192     stats->num_dups++;
1193 
1194     node->skb = frame;
1195     is_queued = true;
1196     node->is_amsdu = is_amsdu;
1197     node->seq_no = seq_no;
1198 
1199     if (node->is_amsdu)
1200         stats->num_amsdu++;
1201     else
1202         stats->num_mpdu++;
1203 
1204     spin_unlock_bh(&rxtid->lock);
1205 
1206     aggr_deque_frms(agg_conn, tid, 0, 1);
1207 
1208     if (agg_conn->timer_scheduled)
1209         return is_queued;
1210 
1211     spin_lock_bh(&rxtid->lock);
1212     for (idx = 0; idx < rxtid->hold_q_sz; idx++) {
1213         if (rxtid->hold_q[idx].skb) {
1214             /*
1215              * There is a frame in the queue and no
1216              * timer so start a timer to ensure that
1217              * the frame doesn't remain stuck
1218              * forever.
1219              */
1220             agg_conn->timer_scheduled = true;
1221             mod_timer(&agg_conn->timer,
1222                   (jiffies + (HZ * AGGR_RX_TIMEOUT) / 1000));
1223             rxtid->timer_mon = true;
1224             break;
1225         }
1226     }
1227     spin_unlock_bh(&rxtid->lock);
1228 
1229     return is_queued;
1230 }
1231 
1232 static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
1233                          struct ath6kl_sta *conn)
1234 {
1235     struct ath6kl *ar = vif->ar;
1236     bool is_apsdq_empty, is_apsdq_empty_at_start;
1237     u32 num_frames_to_deliver, flags;
1238     struct sk_buff *skb = NULL;
1239 
1240     /*
1241      * If the APSD q for this STA is not empty, dequeue and
1242      * send a pkt from the head of the q. Also update the
1243      * More data bit in the WMI_DATA_HDR if there are
1244      * more pkts for this STA in the APSD q.
1245      * If there are no more pkts for this STA,
1246      * update the APSD bitmap for this STA.
1247      */
1248 
1249     num_frames_to_deliver = (conn->apsd_info >> ATH6KL_APSD_NUM_OF_AC) &
1250                             ATH6KL_APSD_FRAME_MASK;
1251     /*
1252      * Number of frames to send in a service period is
1253      * indicated by the station
1254      * in the QOS_INFO of the association request
1255      * If it is zero, send all frames
1256      */
1257     if (!num_frames_to_deliver)
1258         num_frames_to_deliver = ATH6KL_APSD_ALL_FRAME;
1259 
1260     spin_lock_bh(&conn->psq_lock);
1261     is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1262     spin_unlock_bh(&conn->psq_lock);
1263     is_apsdq_empty_at_start = is_apsdq_empty;
1264 
1265     while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
1266         spin_lock_bh(&conn->psq_lock);
1267         skb = skb_dequeue(&conn->apsdq);
1268         is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1269         spin_unlock_bh(&conn->psq_lock);
1270 
1271         /*
1272          * Set the STA flag to Trigger delivery,
1273          * so that the frame will go out
1274          */
1275         conn->sta_flags |= STA_PS_APSD_TRIGGER;
1276         num_frames_to_deliver--;
1277 
1278         /* Last frame in the service period, set EOSP or queue empty */
1279         if ((is_apsdq_empty) || (!num_frames_to_deliver))
1280             conn->sta_flags |= STA_PS_APSD_EOSP;
1281 
1282         ath6kl_data_tx(skb, vif->ndev);
1283         conn->sta_flags &= ~(STA_PS_APSD_TRIGGER);
1284         conn->sta_flags &= ~(STA_PS_APSD_EOSP);
1285     }
1286 
1287     if (is_apsdq_empty) {
1288         if (is_apsdq_empty_at_start)
1289             flags = WMI_AP_APSD_NO_DELIVERY_FRAMES;
1290         else
1291             flags = 0;
1292 
1293         ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
1294                           vif->fw_vif_idx,
1295                           conn->aid, 0, flags);
1296     }
1297 
1298     return;
1299 }
1300 
1301 void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1302 {
1303     struct ath6kl *ar = target->dev->ar;
1304     struct sk_buff *skb = packet->pkt_cntxt;
1305     struct wmi_rx_meta_v2 *meta;
1306     struct wmi_data_hdr *dhdr;
1307     int min_hdr_len;
1308     u8 meta_type, dot11_hdr = 0;
1309     u8 pad_before_data_start;
1310     int status = packet->status;
1311     enum htc_endpoint_id ept = packet->endpoint;
1312     bool is_amsdu, prev_ps, ps_state = false;
1313     bool trig_state = false;
1314     struct ath6kl_sta *conn = NULL;
1315     struct sk_buff *skb1 = NULL;
1316     struct ethhdr *datap = NULL;
1317     struct ath6kl_vif *vif;
1318     struct aggr_info_conn *aggr_conn;
1319     u16 seq_no, offset;
1320     u8 tid, if_idx;
1321 
1322     ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
1323            "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
1324            __func__, ar, ept, skb, packet->buf,
1325            packet->act_len, status);
1326 
1327     if (status || packet->act_len < HTC_HDR_LENGTH) {
1328         dev_kfree_skb(skb);
1329         return;
1330     }
1331 
1332     skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
1333     skb_pull(skb, HTC_HDR_LENGTH);
1334 
1335     ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
1336             skb->data, skb->len);
1337 
1338     if (ept == ar->ctrl_ep) {
1339         if (test_bit(WMI_ENABLED, &ar->flag)) {
1340             ath6kl_check_wow_status(ar);
1341             ath6kl_wmi_control_rx(ar->wmi, skb);
1342             return;
1343         }
1344         if_idx =
1345         wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
1346     } else {
1347         if_idx =
1348         wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
1349     }
1350 
1351     vif = ath6kl_get_vif_by_index(ar, if_idx);
1352     if (!vif) {
1353         dev_kfree_skb(skb);
1354         return;
1355     }
1356 
1357     /*
1358      * Take lock to protect buffer counts and adaptive power throughput
1359      * state.
1360      */
1361     spin_lock_bh(&vif->if_lock);
1362 
1363     vif->ndev->stats.rx_packets++;
1364     vif->ndev->stats.rx_bytes += packet->act_len;
1365 
1366     spin_unlock_bh(&vif->if_lock);
1367 
1368     skb->dev = vif->ndev;
1369 
1370     if (!test_bit(WMI_ENABLED, &ar->flag)) {
1371         if (EPPING_ALIGNMENT_PAD > 0)
1372             skb_pull(skb, EPPING_ALIGNMENT_PAD);
1373         ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1374         return;
1375     }
1376 
1377     ath6kl_check_wow_status(ar);
1378 
1379     min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
1380               sizeof(struct ath6kl_llc_snap_hdr);
1381 
1382     dhdr = (struct wmi_data_hdr *) skb->data;
1383 
1384     /*
1385      * In the case of AP mode we may receive NULL data frames
1386      * that do not have LLC hdr. They are 16 bytes in size.
1387      * Allow these frames in the AP mode.
1388      */
1389     if (vif->nw_type != AP_NETWORK &&
1390         ((packet->act_len < min_hdr_len) ||
1391          (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
1392         ath6kl_info("frame len is too short or too long\n");
1393         vif->ndev->stats.rx_errors++;
1394         vif->ndev->stats.rx_length_errors++;
1395         dev_kfree_skb(skb);
1396         return;
1397     }
1398 
1399     pad_before_data_start =
1400         (le16_to_cpu(dhdr->info3) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT)
1401             & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK;
1402 
1403     /* Get the Power save state of the STA */
1404     if (vif->nw_type == AP_NETWORK) {
1405         meta_type = wmi_data_hdr_get_meta(dhdr);
1406 
1407         ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
1408                   WMI_DATA_HDR_PS_MASK);
1409 
1410         offset = sizeof(struct wmi_data_hdr) + pad_before_data_start;
1411         trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG);
1412 
1413         switch (meta_type) {
1414         case 0:
1415             break;
1416         case WMI_META_VERSION_1:
1417             offset += sizeof(struct wmi_rx_meta_v1);
1418             break;
1419         case WMI_META_VERSION_2:
1420             offset += sizeof(struct wmi_rx_meta_v2);
1421             break;
1422         default:
1423             break;
1424         }
1425 
1426         datap = (struct ethhdr *) (skb->data + offset);
1427         conn = ath6kl_find_sta(vif, datap->h_source);
1428 
1429         if (!conn) {
1430             dev_kfree_skb(skb);
1431             return;
1432         }
1433 
1434         /*
1435          * If there is a change in PS state of the STA,
1436          * take appropriate steps:
1437          *
1438          * 1. If Sleep-->Awake, flush the psq for the STA
1439          *    Clear the PVB for the STA.
1440          * 2. If Awake-->Sleep, Starting queueing frames
1441          *    the STA.
1442          */
1443         prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
1444 
1445         if (ps_state)
1446             conn->sta_flags |= STA_PS_SLEEP;
1447         else
1448             conn->sta_flags &= ~STA_PS_SLEEP;
1449 
1450         /* Accept trigger only when the station is in sleep */
1451         if ((conn->sta_flags & STA_PS_SLEEP) && trig_state)
1452             ath6kl_uapsd_trigger_frame_rx(vif, conn);
1453 
1454         if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
1455             if (!(conn->sta_flags & STA_PS_SLEEP)) {
1456                 struct sk_buff *skbuff = NULL;
1457                 bool is_apsdq_empty;
1458                 struct ath6kl_mgmt_buff *mgmt;
1459                 u8 idx;
1460 
1461                 spin_lock_bh(&conn->psq_lock);
1462                 while (conn->mgmt_psq_len > 0) {
1463                     mgmt = list_first_entry(
1464                             &conn->mgmt_psq,
1465                             struct ath6kl_mgmt_buff,
1466                             list);
1467                     list_del(&mgmt->list);
1468                     conn->mgmt_psq_len--;
1469                     spin_unlock_bh(&conn->psq_lock);
1470                     idx = vif->fw_vif_idx;
1471 
1472                     ath6kl_wmi_send_mgmt_cmd(ar->wmi,
1473                                  idx,
1474                                  mgmt->id,
1475                                  mgmt->freq,
1476                                  mgmt->wait,
1477                                  mgmt->buf,
1478                                  mgmt->len,
1479                                  mgmt->no_cck);
1480 
1481                     kfree(mgmt);
1482                     spin_lock_bh(&conn->psq_lock);
1483                 }
1484                 conn->mgmt_psq_len = 0;
1485                 while ((skbuff = skb_dequeue(&conn->psq))) {
1486                     spin_unlock_bh(&conn->psq_lock);
1487                     ath6kl_data_tx(skbuff, vif->ndev);
1488                     spin_lock_bh(&conn->psq_lock);
1489                 }
1490 
1491                 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1492                 while ((skbuff = skb_dequeue(&conn->apsdq))) {
1493                     spin_unlock_bh(&conn->psq_lock);
1494                     ath6kl_data_tx(skbuff, vif->ndev);
1495                     spin_lock_bh(&conn->psq_lock);
1496                 }
1497                 spin_unlock_bh(&conn->psq_lock);
1498 
1499                 if (!is_apsdq_empty)
1500                     ath6kl_wmi_set_apsd_bfrd_traf(
1501                             ar->wmi,
1502                             vif->fw_vif_idx,
1503                             conn->aid, 0, 0);
1504 
1505                 /* Clear the PVB for this STA */
1506                 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
1507                                conn->aid, 0);
1508             }
1509         }
1510 
1511         /* drop NULL data frames here */
1512         if ((packet->act_len < min_hdr_len) ||
1513             (packet->act_len >
1514              WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) {
1515             dev_kfree_skb(skb);
1516             return;
1517         }
1518     }
1519 
1520     is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false;
1521     tid = wmi_data_hdr_get_up(dhdr);
1522     seq_no = wmi_data_hdr_get_seqno(dhdr);
1523     meta_type = wmi_data_hdr_get_meta(dhdr);
1524     dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
1525 
1526     skb_pull(skb, sizeof(struct wmi_data_hdr));
1527 
1528     switch (meta_type) {
1529     case WMI_META_VERSION_1:
1530         skb_pull(skb, sizeof(struct wmi_rx_meta_v1));
1531         break;
1532     case WMI_META_VERSION_2:
1533         meta = (struct wmi_rx_meta_v2 *) skb->data;
1534         if (meta->csum_flags & 0x1) {
1535             skb->ip_summed = CHECKSUM_COMPLETE;
1536             skb->csum = (__force __wsum) meta->csum;
1537         }
1538         skb_pull(skb, sizeof(struct wmi_rx_meta_v2));
1539         break;
1540     default:
1541         break;
1542     }
1543 
1544     skb_pull(skb, pad_before_data_start);
1545 
1546     if (dot11_hdr)
1547         status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
1548     else if (!is_amsdu)
1549         status = ath6kl_wmi_dot3_2_dix(skb);
1550 
1551     if (status) {
1552         /*
1553          * Drop frames that could not be processed (lack of
1554          * memory, etc.)
1555          */
1556         dev_kfree_skb(skb);
1557         return;
1558     }
1559 
1560     if (!(vif->ndev->flags & IFF_UP)) {
1561         dev_kfree_skb(skb);
1562         return;
1563     }
1564 
1565     if (vif->nw_type == AP_NETWORK) {
1566         datap = (struct ethhdr *) skb->data;
1567         if (is_multicast_ether_addr(datap->h_dest))
1568             /*
1569              * Bcast/Mcast frames should be sent to the
1570              * OS stack as well as on the air.
1571              */
1572             skb1 = skb_copy(skb, GFP_ATOMIC);
1573         else {
1574             /*
1575              * Search for a connected STA with dstMac
1576              * as the Mac address. If found send the
1577              * frame to it on the air else send the
1578              * frame up the stack.
1579              */
1580             conn = ath6kl_find_sta(vif, datap->h_dest);
1581 
1582             if (conn && ar->intra_bss) {
1583                 skb1 = skb;
1584                 skb = NULL;
1585             } else if (conn && !ar->intra_bss) {
1586                 dev_kfree_skb(skb);
1587                 skb = NULL;
1588             }
1589         }
1590         if (skb1)
1591             ath6kl_data_tx(skb1, vif->ndev);
1592 
1593         if (skb == NULL) {
1594             /* nothing to deliver up the stack */
1595             return;
1596         }
1597     }
1598 
1599     datap = (struct ethhdr *) skb->data;
1600 
1601     if (is_unicast_ether_addr(datap->h_dest)) {
1602         if (vif->nw_type == AP_NETWORK) {
1603             conn = ath6kl_find_sta(vif, datap->h_source);
1604             if (!conn)
1605                 return;
1606             aggr_conn = conn->aggr_conn;
1607         } else {
1608             aggr_conn = vif->aggr_cntxt->aggr_conn;
1609         }
1610 
1611         if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
1612                       is_amsdu, skb)) {
1613             /* aggregation code will handle the skb */
1614             return;
1615         }
1616     } else if (!is_broadcast_ether_addr(datap->h_dest)) {
1617         vif->ndev->stats.multicast++;
1618     }
1619 
1620     ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1621 }
1622 
1623 static void aggr_timeout(struct timer_list *t)
1624 {
1625     u8 i, j;
1626     struct aggr_info_conn *aggr_conn = from_timer(aggr_conn, t, timer);
1627     struct rxtid *rxtid;
1628     struct rxtid_stats *stats;
1629 
1630     for (i = 0; i < NUM_OF_TIDS; i++) {
1631         rxtid = &aggr_conn->rx_tid[i];
1632         stats = &aggr_conn->stat[i];
1633 
1634         if (!rxtid->aggr || !rxtid->timer_mon)
1635             continue;
1636 
1637         stats->num_timeouts++;
1638         ath6kl_dbg(ATH6KL_DBG_AGGR,
1639                "aggr timeout (st %d end %d)\n",
1640                rxtid->seq_next,
1641                ((rxtid->seq_next + rxtid->hold_q_sz-1) &
1642                 ATH6KL_MAX_SEQ_NO));
1643         aggr_deque_frms(aggr_conn, i, 0, 0);
1644     }
1645 
1646     aggr_conn->timer_scheduled = false;
1647 
1648     for (i = 0; i < NUM_OF_TIDS; i++) {
1649         rxtid = &aggr_conn->rx_tid[i];
1650 
1651         if (rxtid->aggr && rxtid->hold_q) {
1652             spin_lock_bh(&rxtid->lock);
1653             for (j = 0; j < rxtid->hold_q_sz; j++) {
1654                 if (rxtid->hold_q[j].skb) {
1655                     aggr_conn->timer_scheduled = true;
1656                     rxtid->timer_mon = true;
1657                     break;
1658                 }
1659             }
1660             spin_unlock_bh(&rxtid->lock);
1661 
1662             if (j >= rxtid->hold_q_sz)
1663                 rxtid->timer_mon = false;
1664         }
1665     }
1666 
1667     if (aggr_conn->timer_scheduled)
1668         mod_timer(&aggr_conn->timer,
1669               jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
1670 }
1671 
1672 static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid)
1673 {
1674     struct rxtid *rxtid;
1675     struct rxtid_stats *stats;
1676 
1677     if (!aggr_conn || tid >= NUM_OF_TIDS)
1678         return;
1679 
1680     rxtid = &aggr_conn->rx_tid[tid];
1681     stats = &aggr_conn->stat[tid];
1682 
1683     if (rxtid->aggr)
1684         aggr_deque_frms(aggr_conn, tid, 0, 0);
1685 
1686     rxtid->aggr = false;
1687     rxtid->timer_mon = false;
1688     rxtid->win_sz = 0;
1689     rxtid->seq_next = 0;
1690     rxtid->hold_q_sz = 0;
1691 
1692     kfree(rxtid->hold_q);
1693     rxtid->hold_q = NULL;
1694 
1695     memset(stats, 0, sizeof(struct rxtid_stats));
1696 }
1697 
1698 void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
1699                  u8 win_sz)
1700 {
1701     struct ath6kl_sta *sta;
1702     struct aggr_info_conn *aggr_conn = NULL;
1703     struct rxtid *rxtid;
1704     u16 hold_q_size;
1705     u8 tid, aid;
1706 
1707     if (vif->nw_type == AP_NETWORK) {
1708         aid = ath6kl_get_aid(tid_mux);
1709         sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1710         if (sta)
1711             aggr_conn = sta->aggr_conn;
1712     } else {
1713         aggr_conn = vif->aggr_cntxt->aggr_conn;
1714     }
1715 
1716     if (!aggr_conn)
1717         return;
1718 
1719     tid = ath6kl_get_tid(tid_mux);
1720     if (tid >= NUM_OF_TIDS)
1721         return;
1722 
1723     rxtid = &aggr_conn->rx_tid[tid];
1724 
1725     if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
1726         ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
1727                __func__, win_sz, tid);
1728 
1729     if (rxtid->aggr)
1730         aggr_delete_tid_state(aggr_conn, tid);
1731 
1732     rxtid->seq_next = seq_no;
1733     hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
1734     rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
1735     if (!rxtid->hold_q)
1736         return;
1737 
1738     rxtid->win_sz = win_sz;
1739     rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
1740     if (!skb_queue_empty(&rxtid->q))
1741         return;
1742 
1743     rxtid->aggr = true;
1744 }
1745 
1746 void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
1747             struct aggr_info_conn *aggr_conn)
1748 {
1749     struct rxtid *rxtid;
1750     u8 i;
1751 
1752     aggr_conn->aggr_sz = AGGR_SZ_DEFAULT;
1753     aggr_conn->dev = vif->ndev;
1754     timer_setup(&aggr_conn->timer, aggr_timeout, 0);
1755     aggr_conn->aggr_info = aggr_info;
1756 
1757     aggr_conn->timer_scheduled = false;
1758 
1759     for (i = 0; i < NUM_OF_TIDS; i++) {
1760         rxtid = &aggr_conn->rx_tid[i];
1761         rxtid->aggr = false;
1762         rxtid->timer_mon = false;
1763         skb_queue_head_init(&rxtid->q);
1764         spin_lock_init(&rxtid->lock);
1765     }
1766 }
1767 
1768 struct aggr_info *aggr_init(struct ath6kl_vif *vif)
1769 {
1770     struct aggr_info *p_aggr = NULL;
1771 
1772     p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
1773     if (!p_aggr) {
1774         ath6kl_err("failed to alloc memory for aggr_node\n");
1775         return NULL;
1776     }
1777 
1778     p_aggr->aggr_conn = kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL);
1779     if (!p_aggr->aggr_conn) {
1780         ath6kl_err("failed to alloc memory for connection specific aggr info\n");
1781         kfree(p_aggr);
1782         return NULL;
1783     }
1784 
1785     aggr_conn_init(vif, p_aggr, p_aggr->aggr_conn);
1786 
1787     skb_queue_head_init(&p_aggr->rx_amsdu_freeq);
1788     ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, AGGR_NUM_OF_FREE_NETBUFS);
1789 
1790     return p_aggr;
1791 }
1792 
1793 void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
1794 {
1795     struct ath6kl_sta *sta;
1796     struct rxtid *rxtid;
1797     struct aggr_info_conn *aggr_conn = NULL;
1798     u8 tid, aid;
1799 
1800     if (vif->nw_type == AP_NETWORK) {
1801         aid = ath6kl_get_aid(tid_mux);
1802         sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1803         if (sta)
1804             aggr_conn = sta->aggr_conn;
1805     } else {
1806         aggr_conn = vif->aggr_cntxt->aggr_conn;
1807     }
1808 
1809     if (!aggr_conn)
1810         return;
1811 
1812     tid = ath6kl_get_tid(tid_mux);
1813     if (tid >= NUM_OF_TIDS)
1814         return;
1815 
1816     rxtid = &aggr_conn->rx_tid[tid];
1817 
1818     if (rxtid->aggr)
1819         aggr_delete_tid_state(aggr_conn, tid);
1820 }
1821 
1822 void aggr_reset_state(struct aggr_info_conn *aggr_conn)
1823 {
1824     u8 tid;
1825 
1826     if (!aggr_conn)
1827         return;
1828 
1829     if (aggr_conn->timer_scheduled) {
1830         del_timer(&aggr_conn->timer);
1831         aggr_conn->timer_scheduled = false;
1832     }
1833 
1834     for (tid = 0; tid < NUM_OF_TIDS; tid++)
1835         aggr_delete_tid_state(aggr_conn, tid);
1836 }
1837 
1838 /* clean up our amsdu buffer list */
1839 void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
1840 {
1841     struct htc_packet *packet, *tmp_pkt;
1842 
1843     spin_lock_bh(&ar->lock);
1844     if (list_empty(&ar->amsdu_rx_buffer_queue)) {
1845         spin_unlock_bh(&ar->lock);
1846         return;
1847     }
1848 
1849     list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue,
1850                  list) {
1851         list_del(&packet->list);
1852         spin_unlock_bh(&ar->lock);
1853         dev_kfree_skb(packet->pkt_cntxt);
1854         spin_lock_bh(&ar->lock);
1855     }
1856 
1857     spin_unlock_bh(&ar->lock);
1858 }
1859 
1860 void aggr_module_destroy(struct aggr_info *aggr_info)
1861 {
1862     if (!aggr_info)
1863         return;
1864 
1865     aggr_reset_state(aggr_info->aggr_conn);
1866     skb_queue_purge(&aggr_info->rx_amsdu_freeq);
1867     kfree(aggr_info->aggr_conn);
1868     kfree(aggr_info);
1869 }