Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2007-2011 Atheros Communications Inc.
0003  * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
0004  *
0005  * Permission to use, copy, modify, and/or distribute this software for any
0006  * purpose with or without fee is hereby granted, provided that the above
0007  * copyright notice and this permission notice appear in all copies.
0008  *
0009  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
0010  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
0011  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
0012  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
0013  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
0014  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
0015  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
0016  */
0017 
0018 #include "core.h"
0019 #include "hif.h"
0020 #include "debug.h"
0021 #include "hif-ops.h"
0022 #include "trace.h"
0023 
0024 #include <asm/unaligned.h>
0025 
0026 #define CALC_TXRX_PADDED_LEN(dev, len)  (__ALIGN_MASK((len), (dev)->block_mask))
0027 
0028 static void ath6kl_htc_mbox_cleanup(struct htc_target *target);
0029 static void ath6kl_htc_mbox_stop(struct htc_target *target);
0030 static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target,
0031                           struct list_head *pkt_queue);
0032 static void ath6kl_htc_set_credit_dist(struct htc_target *target,
0033                        struct ath6kl_htc_credit_info *cred_info,
0034                        u16 svc_pri_order[], int len);
0035 
0036 /* threshold to re-enable Tx bundling for an AC*/
0037 #define TX_RESUME_BUNDLE_THRESHOLD  1500
0038 
0039 /* Functions for Tx credit handling */
0040 static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info,
0041                   struct htc_endpoint_credit_dist *ep_dist,
0042                   int credits)
0043 {
0044     ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n",
0045            ep_dist->endpoint, credits);
0046 
0047     ep_dist->credits += credits;
0048     ep_dist->cred_assngd += credits;
0049     cred_info->cur_free_credits -= credits;
0050 }
0051 
0052 static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
0053                    struct list_head *ep_list,
0054                    int tot_credits)
0055 {
0056     struct htc_endpoint_credit_dist *cur_ep_dist;
0057     int count;
0058 
0059     ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits);
0060 
0061     cred_info->cur_free_credits = tot_credits;
0062     cred_info->total_avail_credits = tot_credits;
0063 
0064     list_for_each_entry(cur_ep_dist, ep_list, list) {
0065         if (cur_ep_dist->endpoint == ENDPOINT_0)
0066             continue;
0067 
0068         cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
0069 
0070         if (tot_credits > 4) {
0071             if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
0072                 (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
0073                 ath6kl_credit_deposit(cred_info,
0074                               cur_ep_dist,
0075                               cur_ep_dist->cred_min);
0076                 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
0077             }
0078         }
0079 
0080         if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
0081             ath6kl_credit_deposit(cred_info, cur_ep_dist,
0082                           cur_ep_dist->cred_min);
0083             /*
0084              * Control service is always marked active, it
0085              * never goes inactive EVER.
0086              */
0087             cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
0088         }
0089 
0090         /*
0091          * Streams have to be created (explicit | implicit) for all
0092          * kinds of traffic. BE endpoints are also inactive in the
0093          * beginning. When BE traffic starts it creates implicit
0094          * streams that redistributes credits.
0095          *
0096          * Note: all other endpoints have minimums set but are
0097          * initially given NO credits. credits will be distributed
0098          * as traffic activity demands
0099          */
0100     }
0101 
0102     /*
0103      * For ath6kl_credit_seek function,
0104      * it use list_for_each_entry_reverse to walk around the whole ep list.
0105      * Therefore assign this lowestpri_ep_dist after walk around the ep_list
0106      */
0107     cred_info->lowestpri_ep_dist = cur_ep_dist->list;
0108 
0109     WARN_ON(cred_info->cur_free_credits <= 0);
0110 
0111     list_for_each_entry(cur_ep_dist, ep_list, list) {
0112         if (cur_ep_dist->endpoint == ENDPOINT_0)
0113             continue;
0114 
0115         if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
0116             cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
0117         } else {
0118             /*
0119              * For the remaining data endpoints, we assume that
0120              * each cred_per_msg are the same. We use a simple
0121              * calculation here, we take the remaining credits
0122              * and determine how many max messages this can
0123              * cover and then set each endpoint's normal value
0124              * equal to 3/4 this amount.
0125              */
0126             count = (cred_info->cur_free_credits /
0127                  cur_ep_dist->cred_per_msg)
0128                 * cur_ep_dist->cred_per_msg;
0129             count = (count * 3) >> 2;
0130             count = max(count, cur_ep_dist->cred_per_msg);
0131             cur_ep_dist->cred_norm = count;
0132         }
0133 
0134         ath6kl_dbg(ATH6KL_DBG_CREDIT,
0135                "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n",
0136                cur_ep_dist->endpoint,
0137                cur_ep_dist->svc_id,
0138                cur_ep_dist->credits,
0139                cur_ep_dist->cred_per_msg,
0140                cur_ep_dist->cred_norm,
0141                cur_ep_dist->cred_min);
0142     }
0143 }
0144 
0145 /* initialize and setup credit distribution */
0146 static int ath6kl_htc_mbox_credit_setup(struct htc_target *htc_target,
0147                    struct ath6kl_htc_credit_info *cred_info)
0148 {
0149     u16 servicepriority[5];
0150 
0151     memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info));
0152 
0153     servicepriority[0] = WMI_CONTROL_SVC;  /* highest */
0154     servicepriority[1] = WMI_DATA_VO_SVC;
0155     servicepriority[2] = WMI_DATA_VI_SVC;
0156     servicepriority[3] = WMI_DATA_BE_SVC;
0157     servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
0158 
0159     /* set priority list */
0160     ath6kl_htc_set_credit_dist(htc_target, cred_info, servicepriority, 5);
0161 
0162     return 0;
0163 }
0164 
0165 /* reduce an ep's credits back to a set limit */
0166 static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info,
0167                  struct htc_endpoint_credit_dist *ep_dist,
0168                  int limit)
0169 {
0170     int credits;
0171 
0172     ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n",
0173            ep_dist->endpoint, limit);
0174 
0175     ep_dist->cred_assngd = limit;
0176 
0177     if (ep_dist->credits <= limit)
0178         return;
0179 
0180     credits = ep_dist->credits - limit;
0181     ep_dist->credits -= credits;
0182     cred_info->cur_free_credits += credits;
0183 }
0184 
0185 static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info,
0186                  struct list_head *epdist_list)
0187 {
0188     struct htc_endpoint_credit_dist *cur_list;
0189 
0190     list_for_each_entry(cur_list, epdist_list, list) {
0191         if (cur_list->endpoint == ENDPOINT_0)
0192             continue;
0193 
0194         if (cur_list->cred_to_dist > 0) {
0195             cur_list->credits += cur_list->cred_to_dist;
0196             cur_list->cred_to_dist = 0;
0197 
0198             if (cur_list->credits > cur_list->cred_assngd)
0199                 ath6kl_credit_reduce(cred_info,
0200                              cur_list,
0201                              cur_list->cred_assngd);
0202 
0203             if (cur_list->credits > cur_list->cred_norm)
0204                 ath6kl_credit_reduce(cred_info, cur_list,
0205                              cur_list->cred_norm);
0206 
0207             if (!(cur_list->dist_flags & HTC_EP_ACTIVE)) {
0208                 if (cur_list->txq_depth == 0)
0209                     ath6kl_credit_reduce(cred_info,
0210                                  cur_list, 0);
0211             }
0212         }
0213     }
0214 }
0215 
0216 /*
0217  * HTC has an endpoint that needs credits, ep_dist is the endpoint in
0218  * question.
0219  */
0220 static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info,
0221                 struct htc_endpoint_credit_dist *ep_dist)
0222 {
0223     struct htc_endpoint_credit_dist *curdist_list;
0224     int credits = 0;
0225     int need;
0226 
0227     if (ep_dist->svc_id == WMI_CONTROL_SVC)
0228         goto out;
0229 
0230     if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
0231         (ep_dist->svc_id == WMI_DATA_VO_SVC))
0232         if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
0233             goto out;
0234 
0235     /*
0236      * For all other services, we follow a simple algorithm of:
0237      *
0238      * 1. checking the free pool for credits
0239      * 2. checking lower priority endpoints for credits to take
0240      */
0241 
0242     credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
0243 
0244     if (credits >= ep_dist->seek_cred)
0245         goto out;
0246 
0247     /*
0248      * We don't have enough in the free pool, try taking away from
0249      * lower priority services The rule for taking away credits:
0250      *
0251      *   1. Only take from lower priority endpoints
0252      *   2. Only take what is allocated above the minimum (never
0253      *      starve an endpoint completely)
0254      *   3. Only take what you need.
0255      */
0256 
0257     list_for_each_entry_reverse(curdist_list,
0258                     &cred_info->lowestpri_ep_dist,
0259                     list) {
0260         if (curdist_list == ep_dist)
0261             break;
0262 
0263         need = ep_dist->seek_cred - cred_info->cur_free_credits;
0264 
0265         if ((curdist_list->cred_assngd - need) >=
0266              curdist_list->cred_min) {
0267             /*
0268              * The current one has been allocated more than
0269              * it's minimum and it has enough credits assigned
0270              * above it's minimum to fulfill our need try to
0271              * take away just enough to fulfill our need.
0272              */
0273             ath6kl_credit_reduce(cred_info, curdist_list,
0274                          curdist_list->cred_assngd - need);
0275 
0276             if (cred_info->cur_free_credits >=
0277                 ep_dist->seek_cred)
0278                 break;
0279         }
0280 
0281         if (curdist_list->endpoint == ENDPOINT_0)
0282             break;
0283     }
0284 
0285     credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
0286 
0287 out:
0288     /* did we find some credits? */
0289     if (credits)
0290         ath6kl_credit_deposit(cred_info, ep_dist, credits);
0291 
0292     ep_dist->seek_cred = 0;
0293 }
0294 
0295 /* redistribute credits based on activity change */
0296 static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info,
0297                        struct list_head *ep_dist_list)
0298 {
0299     struct htc_endpoint_credit_dist *curdist_list;
0300 
0301     list_for_each_entry(curdist_list, ep_dist_list, list) {
0302         if (curdist_list->endpoint == ENDPOINT_0)
0303             continue;
0304 
0305         if ((curdist_list->svc_id == WMI_DATA_BK_SVC)  ||
0306             (curdist_list->svc_id == WMI_DATA_BE_SVC))
0307             curdist_list->dist_flags |= HTC_EP_ACTIVE;
0308 
0309         if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
0310             !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
0311             if (curdist_list->txq_depth == 0)
0312                 ath6kl_credit_reduce(info, curdist_list, 0);
0313             else
0314                 ath6kl_credit_reduce(info,
0315                              curdist_list,
0316                              curdist_list->cred_min);
0317         }
0318     }
0319 }
0320 
0321 /*
0322  *
0323  * This function is invoked whenever endpoints require credit
0324  * distributions. A lock is held while this function is invoked, this
0325  * function shall NOT block. The ep_dist_list is a list of distribution
0326  * structures in prioritized order as defined by the call to the
0327  * htc_set_credit_dist() api.
0328  */
0329 static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info,
0330                      struct list_head *ep_dist_list,
0331                   enum htc_credit_dist_reason reason)
0332 {
0333     switch (reason) {
0334     case HTC_CREDIT_DIST_SEND_COMPLETE:
0335         ath6kl_credit_update(cred_info, ep_dist_list);
0336         break;
0337     case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
0338         ath6kl_credit_redistribute(cred_info, ep_dist_list);
0339         break;
0340     default:
0341         break;
0342     }
0343 
0344     WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
0345     WARN_ON(cred_info->cur_free_credits < 0);
0346 }
0347 
0348 static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len)
0349 {
0350     u8 *align_addr;
0351 
0352     if (!IS_ALIGNED((unsigned long) *buf, 4)) {
0353         align_addr = PTR_ALIGN(*buf - 4, 4);
0354         memmove(align_addr, *buf, len);
0355         *buf = align_addr;
0356     }
0357 }
0358 
0359 static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags,
0360                    int ctrl0, int ctrl1)
0361 {
0362     struct htc_frame_hdr *hdr;
0363 
0364     packet->buf -= HTC_HDR_LENGTH;
0365     hdr =  (struct htc_frame_hdr *)packet->buf;
0366 
0367     /* Endianess? */
0368     put_unaligned((u16)packet->act_len, &hdr->payld_len);
0369     hdr->flags = flags;
0370     hdr->eid = packet->endpoint;
0371     hdr->ctrl[0] = ctrl0;
0372     hdr->ctrl[1] = ctrl1;
0373 }
0374 
0375 static void htc_reclaim_txctrl_buf(struct htc_target *target,
0376                    struct htc_packet *pkt)
0377 {
0378     spin_lock_bh(&target->htc_lock);
0379     list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
0380     spin_unlock_bh(&target->htc_lock);
0381 }
0382 
0383 static struct htc_packet *htc_get_control_buf(struct htc_target *target,
0384                           bool tx)
0385 {
0386     struct htc_packet *packet = NULL;
0387     struct list_head *buf_list;
0388 
0389     buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
0390 
0391     spin_lock_bh(&target->htc_lock);
0392 
0393     if (list_empty(buf_list)) {
0394         spin_unlock_bh(&target->htc_lock);
0395         return NULL;
0396     }
0397 
0398     packet = list_first_entry(buf_list, struct htc_packet, list);
0399     list_del(&packet->list);
0400     spin_unlock_bh(&target->htc_lock);
0401 
0402     if (tx)
0403         packet->buf = packet->buf_start + HTC_HDR_LENGTH;
0404 
0405     return packet;
0406 }
0407 
0408 static void htc_tx_comp_update(struct htc_target *target,
0409                    struct htc_endpoint *endpoint,
0410                    struct htc_packet *packet)
0411 {
0412     packet->completion = NULL;
0413     packet->buf += HTC_HDR_LENGTH;
0414 
0415     if (!packet->status)
0416         return;
0417 
0418     ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
0419            packet->status, packet->endpoint, packet->act_len,
0420            packet->info.tx.cred_used);
0421 
0422     /* on failure to submit, reclaim credits for this packet */
0423     spin_lock_bh(&target->tx_lock);
0424     endpoint->cred_dist.cred_to_dist +=
0425                 packet->info.tx.cred_used;
0426     endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
0427 
0428     ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n",
0429            target->credit_info, &target->cred_dist_list);
0430 
0431     ath6kl_credit_distribute(target->credit_info,
0432                  &target->cred_dist_list,
0433                  HTC_CREDIT_DIST_SEND_COMPLETE);
0434 
0435     spin_unlock_bh(&target->tx_lock);
0436 }
0437 
0438 static void htc_tx_complete(struct htc_endpoint *endpoint,
0439                 struct list_head *txq)
0440 {
0441     if (list_empty(txq))
0442         return;
0443 
0444     ath6kl_dbg(ATH6KL_DBG_HTC,
0445            "htc tx complete ep %d pkts %d\n",
0446            endpoint->eid, get_queue_depth(txq));
0447 
0448     ath6kl_tx_complete(endpoint->target, txq);
0449 }
0450 
0451 static void htc_tx_comp_handler(struct htc_target *target,
0452                 struct htc_packet *packet)
0453 {
0454     struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
0455     struct list_head container;
0456 
0457     ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete seqno %d\n",
0458            packet->info.tx.seqno);
0459 
0460     htc_tx_comp_update(target, endpoint, packet);
0461     INIT_LIST_HEAD(&container);
0462     list_add_tail(&packet->list, &container);
0463     /* do completion */
0464     htc_tx_complete(endpoint, &container);
0465 }
0466 
0467 static void htc_async_tx_scat_complete(struct htc_target *target,
0468                        struct hif_scatter_req *scat_req)
0469 {
0470     struct htc_endpoint *endpoint;
0471     struct htc_packet *packet;
0472     struct list_head tx_compq;
0473     int i;
0474 
0475     INIT_LIST_HEAD(&tx_compq);
0476 
0477     ath6kl_dbg(ATH6KL_DBG_HTC,
0478            "htc tx scat complete len %d entries %d\n",
0479            scat_req->len, scat_req->scat_entries);
0480 
0481     if (scat_req->status)
0482         ath6kl_err("send scatter req failed: %d\n", scat_req->status);
0483 
0484     packet = scat_req->scat_list[0].packet;
0485     endpoint = &target->endpoint[packet->endpoint];
0486 
0487     /* walk through the scatter list and process */
0488     for (i = 0; i < scat_req->scat_entries; i++) {
0489         packet = scat_req->scat_list[i].packet;
0490         if (!packet) {
0491             WARN_ON(1);
0492             return;
0493         }
0494 
0495         packet->status = scat_req->status;
0496         htc_tx_comp_update(target, endpoint, packet);
0497         list_add_tail(&packet->list, &tx_compq);
0498     }
0499 
0500     /* free scatter request */
0501     hif_scatter_req_add(target->dev->ar, scat_req);
0502 
0503     /* complete all packets */
0504     htc_tx_complete(endpoint, &tx_compq);
0505 }
0506 
0507 static int ath6kl_htc_tx_issue(struct htc_target *target,
0508                    struct htc_packet *packet)
0509 {
0510     int status;
0511     bool sync = false;
0512     u32 padded_len, send_len;
0513 
0514     if (!packet->completion)
0515         sync = true;
0516 
0517     send_len = packet->act_len + HTC_HDR_LENGTH;
0518 
0519     padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
0520 
0521     ath6kl_dbg(ATH6KL_DBG_HTC,
0522            "htc tx issue len %d seqno %d padded_len %d mbox 0x%X %s\n",
0523            send_len, packet->info.tx.seqno, padded_len,
0524            target->dev->ar->mbox_info.htc_addr,
0525            sync ? "sync" : "async");
0526 
0527     if (sync) {
0528         status = hif_read_write_sync(target->dev->ar,
0529                 target->dev->ar->mbox_info.htc_addr,
0530                  packet->buf, padded_len,
0531                  HIF_WR_SYNC_BLOCK_INC);
0532 
0533         packet->status = status;
0534         packet->buf += HTC_HDR_LENGTH;
0535     } else
0536         status = hif_write_async(target->dev->ar,
0537                 target->dev->ar->mbox_info.htc_addr,
0538                 packet->buf, padded_len,
0539                 HIF_WR_ASYNC_BLOCK_INC, packet);
0540 
0541     trace_ath6kl_htc_tx(status, packet->endpoint, packet->buf, send_len);
0542 
0543     return status;
0544 }
0545 
0546 static int htc_check_credits(struct htc_target *target,
0547                  struct htc_endpoint *ep, u8 *flags,
0548                  enum htc_endpoint_id eid, unsigned int len,
0549                  int *req_cred)
0550 {
0551     *req_cred = (len > target->tgt_cred_sz) ?
0552              DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
0553 
0554     ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n",
0555            *req_cred, ep->cred_dist.credits);
0556 
0557     if (ep->cred_dist.credits < *req_cred) {
0558         if (eid == ENDPOINT_0)
0559             return -EINVAL;
0560 
0561         /* Seek more credits */
0562         ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
0563 
0564         ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
0565 
0566         ep->cred_dist.seek_cred = 0;
0567 
0568         if (ep->cred_dist.credits < *req_cred) {
0569             ath6kl_dbg(ATH6KL_DBG_CREDIT,
0570                    "credit not found for ep %d\n",
0571                    eid);
0572             return -EINVAL;
0573         }
0574     }
0575 
0576     ep->cred_dist.credits -= *req_cred;
0577     ep->ep_st.cred_cosumd += *req_cred;
0578 
0579      /* When we are getting low on credits, ask for more */
0580     if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
0581         ep->cred_dist.seek_cred =
0582         ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
0583 
0584         ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
0585 
0586         /* see if we were successful in getting more */
0587         if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
0588             /* tell the target we need credits ASAP! */
0589             *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
0590             ep->ep_st.cred_low_indicate += 1;
0591             ath6kl_dbg(ATH6KL_DBG_CREDIT,
0592                    "credit we need credits asap\n");
0593         }
0594     }
0595 
0596     return 0;
0597 }
0598 
0599 static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
0600                    struct htc_endpoint *endpoint,
0601                    struct list_head *queue)
0602 {
0603     int req_cred;
0604     u8 flags;
0605     struct htc_packet *packet;
0606     unsigned int len;
0607 
0608     while (true) {
0609         flags = 0;
0610 
0611         if (list_empty(&endpoint->txq))
0612             break;
0613         packet = list_first_entry(&endpoint->txq, struct htc_packet,
0614                       list);
0615 
0616         ath6kl_dbg(ATH6KL_DBG_HTC,
0617                "htc tx got packet 0x%p queue depth %d\n",
0618                packet, get_queue_depth(&endpoint->txq));
0619 
0620         len = CALC_TXRX_PADDED_LEN(target,
0621                        packet->act_len + HTC_HDR_LENGTH);
0622 
0623         if (htc_check_credits(target, endpoint, &flags,
0624                       packet->endpoint, len, &req_cred))
0625             break;
0626 
0627         /* now we can fully move onto caller's queue */
0628         packet = list_first_entry(&endpoint->txq, struct htc_packet,
0629                       list);
0630         list_move_tail(&packet->list, queue);
0631 
0632         /* save the number of credits this packet consumed */
0633         packet->info.tx.cred_used = req_cred;
0634 
0635         /* all TX packets are handled asynchronously */
0636         packet->completion = htc_tx_comp_handler;
0637         packet->context = target;
0638         endpoint->ep_st.tx_issued += 1;
0639 
0640         /* save send flags */
0641         packet->info.tx.flags = flags;
0642         packet->info.tx.seqno = endpoint->seqno;
0643         endpoint->seqno++;
0644     }
0645 }
0646 
0647 /* See if the padded tx length falls on a credit boundary */
0648 static int htc_get_credit_padding(unsigned int cred_sz, int *len,
0649                   struct htc_endpoint *ep)
0650 {
0651     int rem_cred, cred_pad;
0652 
0653     rem_cred = *len % cred_sz;
0654 
0655     /* No padding needed */
0656     if  (!rem_cred)
0657         return 0;
0658 
0659     if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
0660         return -1;
0661 
0662     /*
0663      * The transfer consumes a "partial" credit, this
0664      * packet cannot be bundled unless we add
0665      * additional "dummy" padding (max 255 bytes) to
0666      * consume the entire credit.
0667      */
0668     cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
0669 
0670     if ((cred_pad > 0) && (cred_pad <= 255))
0671         *len += cred_pad;
0672     else
0673         /* The amount of padding is too large, send as non-bundled */
0674         return -1;
0675 
0676     return cred_pad;
0677 }
0678 
0679 static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
0680                      struct htc_endpoint *endpoint,
0681                      struct hif_scatter_req *scat_req,
0682                      int n_scat,
0683                      struct list_head *queue)
0684 {
0685     struct htc_packet *packet;
0686     int i, len, rem_scat, cred_pad;
0687     int status = 0;
0688     u8 flags;
0689 
0690     rem_scat = target->max_tx_bndl_sz;
0691 
0692     for (i = 0; i < n_scat; i++) {
0693         scat_req->scat_list[i].packet = NULL;
0694 
0695         if (list_empty(queue))
0696             break;
0697 
0698         packet = list_first_entry(queue, struct htc_packet, list);
0699         len = CALC_TXRX_PADDED_LEN(target,
0700                        packet->act_len + HTC_HDR_LENGTH);
0701 
0702         cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
0703                           &len, endpoint);
0704         if (cred_pad < 0 || rem_scat < len) {
0705             status = -ENOSPC;
0706             break;
0707         }
0708 
0709         rem_scat -= len;
0710         /* now remove it from the queue */
0711         list_del(&packet->list);
0712 
0713         scat_req->scat_list[i].packet = packet;
0714         /* prepare packet and flag message as part of a send bundle */
0715         flags = packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE;
0716         ath6kl_htc_tx_prep_pkt(packet, flags,
0717                        cred_pad, packet->info.tx.seqno);
0718         /* Make sure the buffer is 4-byte aligned */
0719         ath6kl_htc_tx_buf_align(&packet->buf,
0720                     packet->act_len + HTC_HDR_LENGTH);
0721         scat_req->scat_list[i].buf = packet->buf;
0722         scat_req->scat_list[i].len = len;
0723 
0724         scat_req->len += len;
0725         scat_req->scat_entries++;
0726         ath6kl_dbg(ATH6KL_DBG_HTC,
0727                "htc tx adding (%d) pkt 0x%p seqno %d len %d remaining %d\n",
0728                i, packet, packet->info.tx.seqno, len, rem_scat);
0729     }
0730 
0731     /* Roll back scatter setup in case of any failure */
0732     if (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE) {
0733         for (i = scat_req->scat_entries - 1; i >= 0; i--) {
0734             packet = scat_req->scat_list[i].packet;
0735             if (packet) {
0736                 packet->buf += HTC_HDR_LENGTH;
0737                 list_add(&packet->list, queue);
0738             }
0739         }
0740         return -EAGAIN;
0741     }
0742 
0743     return status;
0744 }
0745 
0746 /*
0747  * Drain a queue and send as bundles this function may return without fully
0748  * draining the queue when
0749  *
0750  *    1. scatter resources are exhausted
0751  *    2. a message that will consume a partial credit will stop the
0752  *    bundling process early
0753  *    3. we drop below the minimum number of messages for a bundle
0754  */
0755 static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
0756                  struct list_head *queue,
0757                  int *sent_bundle, int *n_bundle_pkts)
0758 {
0759     struct htc_target *target = endpoint->target;
0760     struct hif_scatter_req *scat_req = NULL;
0761     int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0, i;
0762     struct htc_packet *packet;
0763     int status;
0764     u32 txb_mask;
0765     u8 ac = WMM_NUM_AC;
0766 
0767     if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) &&
0768         (WMI_CONTROL_SVC != endpoint->svc_id))
0769         ac = target->dev->ar->ep2ac_map[endpoint->eid];
0770 
0771     while (true) {
0772         status = 0;
0773         n_scat = get_queue_depth(queue);
0774         n_scat = min(n_scat, target->msg_per_bndl_max);
0775 
0776         if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
0777             /* not enough to bundle */
0778             break;
0779 
0780         scat_req = hif_scatter_req_get(target->dev->ar);
0781 
0782         if (!scat_req) {
0783             /* no scatter resources  */
0784             ath6kl_dbg(ATH6KL_DBG_HTC,
0785                    "htc tx no more scatter resources\n");
0786             break;
0787         }
0788 
0789         if ((ac < WMM_NUM_AC) && (ac != WMM_AC_BK)) {
0790             if (WMM_AC_BE == ac)
0791                 /*
0792                  * BE, BK have priorities and bit
0793                  * positions reversed
0794                  */
0795                 txb_mask = (1 << WMM_AC_BK);
0796             else
0797                 /*
0798                  * any AC with priority lower than
0799                  * itself
0800                  */
0801                 txb_mask = ((1 << ac) - 1);
0802 
0803             /*
0804              * when the scatter request resources drop below a
0805              * certain threshold, disable Tx bundling for all
0806              * AC's with priority lower than the current requesting
0807              * AC. Otherwise re-enable Tx bundling for them
0808              */
0809             if (scat_req->scat_q_depth < ATH6KL_SCATTER_REQS)
0810                 target->tx_bndl_mask &= ~txb_mask;
0811             else
0812                 target->tx_bndl_mask |= txb_mask;
0813         }
0814 
0815         ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
0816                n_scat);
0817 
0818         scat_req->len = 0;
0819         scat_req->scat_entries = 0;
0820 
0821         status = ath6kl_htc_tx_setup_scat_list(target, endpoint,
0822                                scat_req, n_scat,
0823                                queue);
0824         if (status == -EAGAIN) {
0825             hif_scatter_req_add(target->dev->ar, scat_req);
0826             break;
0827         }
0828 
0829         /* send path is always asynchronous */
0830         scat_req->complete = htc_async_tx_scat_complete;
0831         n_sent_bundle++;
0832         tot_pkts_bundle += scat_req->scat_entries;
0833 
0834         ath6kl_dbg(ATH6KL_DBG_HTC,
0835                "htc tx scatter bytes %d entries %d\n",
0836                scat_req->len, scat_req->scat_entries);
0837 
0838         for (i = 0; i < scat_req->scat_entries; i++) {
0839             packet = scat_req->scat_list[i].packet;
0840             trace_ath6kl_htc_tx(packet->status, packet->endpoint,
0841                         packet->buf, packet->act_len);
0842         }
0843 
0844         ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
0845 
0846         if (status)
0847             break;
0848     }
0849 
0850     *sent_bundle = n_sent_bundle;
0851     *n_bundle_pkts = tot_pkts_bundle;
0852     ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n",
0853            n_sent_bundle);
0854 
0855     return;
0856 }
0857 
0858 static void ath6kl_htc_tx_from_queue(struct htc_target *target,
0859                      struct htc_endpoint *endpoint)
0860 {
0861     struct list_head txq;
0862     struct htc_packet *packet;
0863     int bundle_sent;
0864     int n_pkts_bundle;
0865     u8 ac = WMM_NUM_AC;
0866     int status;
0867 
0868     spin_lock_bh(&target->tx_lock);
0869 
0870     endpoint->tx_proc_cnt++;
0871     if (endpoint->tx_proc_cnt > 1) {
0872         endpoint->tx_proc_cnt--;
0873         spin_unlock_bh(&target->tx_lock);
0874         ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n");
0875         return;
0876     }
0877 
0878     /*
0879      * drain the endpoint TX queue for transmission as long
0880      * as we have enough credits.
0881      */
0882     INIT_LIST_HEAD(&txq);
0883 
0884     if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) &&
0885         (WMI_CONTROL_SVC != endpoint->svc_id))
0886         ac = target->dev->ar->ep2ac_map[endpoint->eid];
0887 
0888     while (true) {
0889         if (list_empty(&endpoint->txq))
0890             break;
0891 
0892         ath6kl_htc_tx_pkts_get(target, endpoint, &txq);
0893 
0894         if (list_empty(&txq))
0895             break;
0896 
0897         spin_unlock_bh(&target->tx_lock);
0898 
0899         bundle_sent = 0;
0900         n_pkts_bundle = 0;
0901 
0902         while (true) {
0903             /* try to send a bundle on each pass */
0904             if ((target->tx_bndl_mask) &&
0905                 (get_queue_depth(&txq) >=
0906                 HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
0907                 int temp1 = 0, temp2 = 0;
0908 
0909                 /* check if bundling is enabled for an AC */
0910                 if (target->tx_bndl_mask & (1 << ac)) {
0911                     ath6kl_htc_tx_bundle(endpoint, &txq,
0912                                  &temp1, &temp2);
0913                     bundle_sent += temp1;
0914                     n_pkts_bundle += temp2;
0915                 }
0916             }
0917 
0918             if (list_empty(&txq))
0919                 break;
0920 
0921             packet = list_first_entry(&txq, struct htc_packet,
0922                           list);
0923             list_del(&packet->list);
0924 
0925             ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags,
0926                            0, packet->info.tx.seqno);
0927             status = ath6kl_htc_tx_issue(target, packet);
0928 
0929             if (status) {
0930                 packet->status = status;
0931                 packet->completion(packet->context, packet);
0932             }
0933         }
0934 
0935         spin_lock_bh(&target->tx_lock);
0936 
0937         endpoint->ep_st.tx_bundles += bundle_sent;
0938         endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
0939 
0940         /*
0941          * if an AC has bundling disabled and no tx bundling
0942          * has occured continously for a certain number of TX,
0943          * enable tx bundling for this AC
0944          */
0945         if (!bundle_sent) {
0946             if (!(target->tx_bndl_mask & (1 << ac)) &&
0947                 (ac < WMM_NUM_AC)) {
0948                 if (++target->ac_tx_count[ac] >=
0949                     TX_RESUME_BUNDLE_THRESHOLD) {
0950                     target->ac_tx_count[ac] = 0;
0951                     target->tx_bndl_mask |= (1 << ac);
0952                 }
0953             }
0954         } else {
0955             /* tx bundling will reset the counter */
0956             if (ac < WMM_NUM_AC)
0957                 target->ac_tx_count[ac] = 0;
0958         }
0959     }
0960 
0961     endpoint->tx_proc_cnt = 0;
0962     spin_unlock_bh(&target->tx_lock);
0963 }
0964 
0965 static bool ath6kl_htc_tx_try(struct htc_target *target,
0966                   struct htc_endpoint *endpoint,
0967                   struct htc_packet *tx_pkt)
0968 {
0969     struct htc_ep_callbacks ep_cb;
0970     int txq_depth;
0971     bool overflow = false;
0972 
0973     ep_cb = endpoint->ep_cb;
0974 
0975     spin_lock_bh(&target->tx_lock);
0976     txq_depth = get_queue_depth(&endpoint->txq);
0977     spin_unlock_bh(&target->tx_lock);
0978 
0979     if (txq_depth >= endpoint->max_txq_depth)
0980         overflow = true;
0981 
0982     if (overflow)
0983         ath6kl_dbg(ATH6KL_DBG_HTC,
0984                "htc tx overflow ep %d depth %d max %d\n",
0985                endpoint->eid, txq_depth,
0986                endpoint->max_txq_depth);
0987 
0988     if (overflow && ep_cb.tx_full) {
0989         if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
0990             HTC_SEND_FULL_DROP) {
0991             endpoint->ep_st.tx_dropped += 1;
0992             return false;
0993         }
0994     }
0995 
0996     spin_lock_bh(&target->tx_lock);
0997     list_add_tail(&tx_pkt->list, &endpoint->txq);
0998     spin_unlock_bh(&target->tx_lock);
0999 
1000     ath6kl_htc_tx_from_queue(target, endpoint);
1001 
1002     return true;
1003 }
1004 
1005 static void htc_chk_ep_txq(struct htc_target *target)
1006 {
1007     struct htc_endpoint *endpoint;
1008     struct htc_endpoint_credit_dist *cred_dist;
1009 
1010     /*
1011      * Run through the credit distribution list to see if there are
1012      * packets queued. NOTE: no locks need to be taken since the
1013      * distribution list is not dynamic (cannot be re-ordered) and we
1014      * are not modifying any state.
1015      */
1016     list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
1017         endpoint = cred_dist->htc_ep;
1018 
1019         spin_lock_bh(&target->tx_lock);
1020         if (!list_empty(&endpoint->txq)) {
1021             ath6kl_dbg(ATH6KL_DBG_HTC,
1022                    "htc creds ep %d credits %d pkts %d\n",
1023                    cred_dist->endpoint,
1024                    endpoint->cred_dist.credits,
1025                    get_queue_depth(&endpoint->txq));
1026             spin_unlock_bh(&target->tx_lock);
1027             /*
1028              * Try to start the stalled queue, this list is
1029              * ordered by priority. If there are credits
1030              * available the highest priority queue will get a
1031              * chance to reclaim credits from lower priority
1032              * ones.
1033              */
1034             ath6kl_htc_tx_from_queue(target, endpoint);
1035             spin_lock_bh(&target->tx_lock);
1036         }
1037         spin_unlock_bh(&target->tx_lock);
1038     }
1039 }
1040 
1041 static int htc_setup_tx_complete(struct htc_target *target)
1042 {
1043     struct htc_packet *send_pkt = NULL;
1044     int status;
1045 
1046     send_pkt = htc_get_control_buf(target, true);
1047 
1048     if (!send_pkt)
1049         return -ENOMEM;
1050 
1051     if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
1052         struct htc_setup_comp_ext_msg *setup_comp_ext;
1053         u32 flags = 0;
1054 
1055         setup_comp_ext =
1056             (struct htc_setup_comp_ext_msg *)send_pkt->buf;
1057         memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
1058         setup_comp_ext->msg_id =
1059             cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
1060 
1061         if (target->msg_per_bndl_max > 0) {
1062             /* Indicate HTC bundling to the target */
1063             flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
1064             setup_comp_ext->msg_per_rxbndl =
1065                         target->msg_per_bndl_max;
1066         }
1067 
1068         memcpy(&setup_comp_ext->flags, &flags,
1069                sizeof(setup_comp_ext->flags));
1070         set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
1071                  sizeof(struct htc_setup_comp_ext_msg),
1072                  ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
1073 
1074     } else {
1075         struct htc_setup_comp_msg *setup_comp;
1076         setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
1077         memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
1078         setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
1079         set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
1080                  sizeof(struct htc_setup_comp_msg),
1081                  ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
1082     }
1083 
1084     /* we want synchronous operation */
1085     send_pkt->completion = NULL;
1086     ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0);
1087     status = ath6kl_htc_tx_issue(target, send_pkt);
1088     htc_reclaim_txctrl_buf(target, send_pkt);
1089 
1090     return status;
1091 }
1092 
1093 static void ath6kl_htc_set_credit_dist(struct htc_target *target,
1094                 struct ath6kl_htc_credit_info *credit_info,
1095                 u16 srvc_pri_order[], int list_len)
1096 {
1097     struct htc_endpoint *endpoint;
1098     int i, ep;
1099 
1100     target->credit_info = credit_info;
1101 
1102     list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
1103               &target->cred_dist_list);
1104 
1105     for (i = 0; i < list_len; i++) {
1106         for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
1107             endpoint = &target->endpoint[ep];
1108             if (endpoint->svc_id == srvc_pri_order[i]) {
1109                 list_add_tail(&endpoint->cred_dist.list,
1110                           &target->cred_dist_list);
1111                 break;
1112             }
1113         }
1114         if (ep >= ENDPOINT_MAX) {
1115             WARN_ON(1);
1116             return;
1117         }
1118     }
1119 }
1120 
1121 static int ath6kl_htc_mbox_tx(struct htc_target *target,
1122                   struct htc_packet *packet)
1123 {
1124     struct htc_endpoint *endpoint;
1125     struct list_head queue;
1126 
1127     ath6kl_dbg(ATH6KL_DBG_HTC,
1128            "htc tx ep id %d buf 0x%p len %d\n",
1129            packet->endpoint, packet->buf, packet->act_len);
1130 
1131     if (packet->endpoint >= ENDPOINT_MAX) {
1132         WARN_ON(1);
1133         return -EINVAL;
1134     }
1135 
1136     endpoint = &target->endpoint[packet->endpoint];
1137 
1138     if (!ath6kl_htc_tx_try(target, endpoint, packet)) {
1139         packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
1140                  -ECANCELED : -ENOSPC;
1141         INIT_LIST_HEAD(&queue);
1142         list_add(&packet->list, &queue);
1143         htc_tx_complete(endpoint, &queue);
1144     }
1145 
1146     return 0;
1147 }
1148 
1149 /* flush endpoint TX queue */
1150 static void ath6kl_htc_mbox_flush_txep(struct htc_target *target,
1151                enum htc_endpoint_id eid, u16 tag)
1152 {
1153     struct htc_packet *packet, *tmp_pkt;
1154     struct list_head discard_q, container;
1155     struct htc_endpoint *endpoint = &target->endpoint[eid];
1156 
1157     if (!endpoint->svc_id) {
1158         WARN_ON(1);
1159         return;
1160     }
1161 
1162     /* initialize the discard queue */
1163     INIT_LIST_HEAD(&discard_q);
1164 
1165     spin_lock_bh(&target->tx_lock);
1166 
1167     list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
1168         if ((tag == HTC_TX_PACKET_TAG_ALL) ||
1169             (tag == packet->info.tx.tag))
1170             list_move_tail(&packet->list, &discard_q);
1171     }
1172 
1173     spin_unlock_bh(&target->tx_lock);
1174 
1175     list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
1176         packet->status = -ECANCELED;
1177         list_del(&packet->list);
1178         ath6kl_dbg(ATH6KL_DBG_HTC,
1179                "htc tx flushing pkt 0x%p len %d  ep %d tag 0x%x\n",
1180                packet, packet->act_len,
1181                packet->endpoint, packet->info.tx.tag);
1182 
1183         INIT_LIST_HEAD(&container);
1184         list_add_tail(&packet->list, &container);
1185         htc_tx_complete(endpoint, &container);
1186     }
1187 }
1188 
1189 static void ath6kl_htc_flush_txep_all(struct htc_target *target)
1190 {
1191     struct htc_endpoint *endpoint;
1192     int i;
1193 
1194     dump_cred_dist_stats(target);
1195 
1196     for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1197         endpoint = &target->endpoint[i];
1198         if (endpoint->svc_id == 0)
1199             /* not in use.. */
1200             continue;
1201         ath6kl_htc_mbox_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
1202     }
1203 }
1204 
1205 static void ath6kl_htc_mbox_activity_changed(struct htc_target *target,
1206                          enum htc_endpoint_id eid,
1207                          bool active)
1208 {
1209     struct htc_endpoint *endpoint = &target->endpoint[eid];
1210     bool dist = false;
1211 
1212     if (endpoint->svc_id == 0) {
1213         WARN_ON(1);
1214         return;
1215     }
1216 
1217     spin_lock_bh(&target->tx_lock);
1218 
1219     if (active) {
1220         if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
1221             endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
1222             dist = true;
1223         }
1224     } else {
1225         if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
1226             endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
1227             dist = true;
1228         }
1229     }
1230 
1231     if (dist) {
1232         endpoint->cred_dist.txq_depth =
1233             get_queue_depth(&endpoint->txq);
1234 
1235         ath6kl_dbg(ATH6KL_DBG_HTC,
1236                "htc tx activity ctxt 0x%p dist 0x%p\n",
1237                target->credit_info, &target->cred_dist_list);
1238 
1239         ath6kl_credit_distribute(target->credit_info,
1240                      &target->cred_dist_list,
1241                      HTC_CREDIT_DIST_ACTIVITY_CHANGE);
1242     }
1243 
1244     spin_unlock_bh(&target->tx_lock);
1245 
1246     if (dist && !active)
1247         htc_chk_ep_txq(target);
1248 }
1249 
1250 /* HTC Rx */
1251 
1252 static inline void ath6kl_htc_rx_update_stats(struct htc_endpoint *endpoint,
1253                           int n_look_ahds)
1254 {
1255     endpoint->ep_st.rx_pkts++;
1256     if (n_look_ahds == 1)
1257         endpoint->ep_st.rx_lkahds++;
1258     else if (n_look_ahds > 1)
1259         endpoint->ep_st.rx_bundle_lkahd++;
1260 }
1261 
1262 static inline bool htc_valid_rx_frame_len(struct htc_target *target,
1263                       enum htc_endpoint_id eid, int len)
1264 {
1265     return (eid == target->dev->ar->ctrl_ep) ?
1266         len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
1267 }
1268 
1269 static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
1270 {
1271     struct list_head queue;
1272 
1273     INIT_LIST_HEAD(&queue);
1274     list_add_tail(&packet->list, &queue);
1275     return ath6kl_htc_mbox_add_rxbuf_multiple(target, &queue);
1276 }
1277 
1278 static void htc_reclaim_rxbuf(struct htc_target *target,
1279                   struct htc_packet *packet,
1280                   struct htc_endpoint *ep)
1281 {
1282     if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
1283         htc_rxpkt_reset(packet);
1284         packet->status = -ECANCELED;
1285         ep->ep_cb.rx(ep->target, packet);
1286     } else {
1287         htc_rxpkt_reset(packet);
1288         htc_add_rxbuf((void *)(target), packet);
1289     }
1290 }
1291 
1292 static void reclaim_rx_ctrl_buf(struct htc_target *target,
1293                 struct htc_packet *packet)
1294 {
1295     spin_lock_bh(&target->htc_lock);
1296     list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
1297     spin_unlock_bh(&target->htc_lock);
1298 }
1299 
1300 static int ath6kl_htc_rx_packet(struct htc_target *target,
1301                 struct htc_packet *packet,
1302                 u32 rx_len)
1303 {
1304     struct ath6kl_device *dev = target->dev;
1305     u32 padded_len;
1306     int status;
1307 
1308     padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
1309 
1310     if (padded_len > packet->buf_len) {
1311         ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n",
1312                padded_len, rx_len, packet->buf_len);
1313         return -ENOMEM;
1314     }
1315 
1316     ath6kl_dbg(ATH6KL_DBG_HTC,
1317            "htc rx 0x%p hdr 0x%x len %d mbox 0x%x\n",
1318            packet, packet->info.rx.exp_hdr,
1319            padded_len, dev->ar->mbox_info.htc_addr);
1320 
1321     status = hif_read_write_sync(dev->ar,
1322                      dev->ar->mbox_info.htc_addr,
1323                      packet->buf, padded_len,
1324                      HIF_RD_SYNC_BLOCK_FIX);
1325 
1326     packet->status = status;
1327 
1328     return status;
1329 }
1330 
1331 /*
1332  * optimization for recv packets, we can indicate a
1333  * "hint" that there are more  single-packets to fetch
1334  * on this endpoint.
1335  */
1336 static void ath6kl_htc_rx_set_indicate(u32 lk_ahd,
1337                        struct htc_endpoint *endpoint,
1338                        struct htc_packet *packet)
1339 {
1340     struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
1341 
1342     if (htc_hdr->eid == packet->endpoint) {
1343         if (!list_empty(&endpoint->rx_bufq))
1344             packet->info.rx.indicat_flags |=
1345                     HTC_RX_FLAGS_INDICATE_MORE_PKTS;
1346     }
1347 }
1348 
1349 static void ath6kl_htc_rx_chk_water_mark(struct htc_endpoint *endpoint)
1350 {
1351     struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
1352 
1353     if (ep_cb.rx_refill_thresh > 0) {
1354         spin_lock_bh(&endpoint->target->rx_lock);
1355         if (get_queue_depth(&endpoint->rx_bufq)
1356             < ep_cb.rx_refill_thresh) {
1357             spin_unlock_bh(&endpoint->target->rx_lock);
1358             ep_cb.rx_refill(endpoint->target, endpoint->eid);
1359             return;
1360         }
1361         spin_unlock_bh(&endpoint->target->rx_lock);
1362     }
1363 }
1364 
1365 /* This function is called with rx_lock held */
1366 static int ath6kl_htc_rx_setup(struct htc_target *target,
1367                    struct htc_endpoint *ep,
1368                    u32 *lk_ahds, struct list_head *queue, int n_msg)
1369 {
1370     struct htc_packet *packet;
1371     /* FIXME: type of lk_ahds can't be right */
1372     struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
1373     struct htc_ep_callbacks ep_cb;
1374     int status = 0, j, full_len;
1375     bool no_recycle;
1376 
1377     full_len = CALC_TXRX_PADDED_LEN(target,
1378                     le16_to_cpu(htc_hdr->payld_len) +
1379                     sizeof(*htc_hdr));
1380 
1381     if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
1382         ath6kl_warn("Rx buffer requested with invalid length htc_hdr:eid %d, flags 0x%x, len %d\n",
1383                 htc_hdr->eid, htc_hdr->flags,
1384                 le16_to_cpu(htc_hdr->payld_len));
1385         return -EINVAL;
1386     }
1387 
1388     ep_cb = ep->ep_cb;
1389     for (j = 0; j < n_msg; j++) {
1390         /*
1391          * Reset flag, any packets allocated using the
1392          * rx_alloc() API cannot be recycled on
1393          * cleanup,they must be explicitly returned.
1394          */
1395         no_recycle = false;
1396 
1397         if (ep_cb.rx_allocthresh &&
1398             (full_len > ep_cb.rx_alloc_thresh)) {
1399             ep->ep_st.rx_alloc_thresh_hit += 1;
1400             ep->ep_st.rxalloc_thresh_byte +=
1401                 le16_to_cpu(htc_hdr->payld_len);
1402 
1403             spin_unlock_bh(&target->rx_lock);
1404             no_recycle = true;
1405 
1406             packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
1407                               full_len);
1408             spin_lock_bh(&target->rx_lock);
1409         } else {
1410             /* refill handler is being used */
1411             if (list_empty(&ep->rx_bufq)) {
1412                 if (ep_cb.rx_refill) {
1413                     spin_unlock_bh(&target->rx_lock);
1414                     ep_cb.rx_refill(ep->target, ep->eid);
1415                     spin_lock_bh(&target->rx_lock);
1416                 }
1417             }
1418 
1419             if (list_empty(&ep->rx_bufq)) {
1420                 packet = NULL;
1421             } else {
1422                 packet = list_first_entry(&ep->rx_bufq,
1423                         struct htc_packet, list);
1424                 list_del(&packet->list);
1425             }
1426         }
1427 
1428         if (!packet) {
1429             target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
1430             target->ep_waiting = ep->eid;
1431             return -ENOSPC;
1432         }
1433 
1434         /* clear flags */
1435         packet->info.rx.rx_flags = 0;
1436         packet->info.rx.indicat_flags = 0;
1437         packet->status = 0;
1438 
1439         if (no_recycle)
1440             /*
1441              * flag that these packets cannot be
1442              * recycled, they have to be returned to
1443              * the user
1444              */
1445             packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
1446 
1447         /* Caller needs to free this upon any failure */
1448         list_add_tail(&packet->list, queue);
1449 
1450         if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1451             status = -ECANCELED;
1452             break;
1453         }
1454 
1455         if (j) {
1456             packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
1457             packet->info.rx.exp_hdr = 0xFFFFFFFF;
1458         } else
1459             /* set expected look ahead */
1460             packet->info.rx.exp_hdr = *lk_ahds;
1461 
1462         packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
1463             HTC_HDR_LENGTH;
1464     }
1465 
1466     return status;
1467 }
1468 
1469 static int ath6kl_htc_rx_alloc(struct htc_target *target,
1470                    u32 lk_ahds[], int msg,
1471                    struct htc_endpoint *endpoint,
1472                    struct list_head *queue)
1473 {
1474     int status = 0;
1475     struct htc_packet *packet, *tmp_pkt;
1476     struct htc_frame_hdr *htc_hdr;
1477     int i, n_msg;
1478 
1479     spin_lock_bh(&target->rx_lock);
1480 
1481     for (i = 0; i < msg; i++) {
1482         htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
1483 
1484         if (htc_hdr->eid >= ENDPOINT_MAX) {
1485             ath6kl_err("invalid ep in look-ahead: %d\n",
1486                    htc_hdr->eid);
1487             status = -ENOMEM;
1488             break;
1489         }
1490 
1491         if (htc_hdr->eid != endpoint->eid) {
1492             ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
1493                    htc_hdr->eid, endpoint->eid, i);
1494             status = -ENOMEM;
1495             break;
1496         }
1497 
1498         if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
1499             ath6kl_err("payload len %d exceeds max htc : %d !\n",
1500                    htc_hdr->payld_len,
1501                    (u32) HTC_MAX_PAYLOAD_LENGTH);
1502             status = -ENOMEM;
1503             break;
1504         }
1505 
1506         if (endpoint->svc_id == 0) {
1507             ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
1508             status = -ENOMEM;
1509             break;
1510         }
1511 
1512         if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
1513             /*
1514              * HTC header indicates that every packet to follow
1515              * has the same padded length so that it can be
1516              * optimally fetched as a full bundle.
1517              */
1518             n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
1519                 HTC_FLG_RX_BNDL_CNT_S;
1520 
1521             /* the count doesn't include the starter frame */
1522             n_msg++;
1523             if (n_msg > target->msg_per_bndl_max) {
1524                 status = -ENOMEM;
1525                 break;
1526             }
1527 
1528             endpoint->ep_st.rx_bundle_from_hdr += 1;
1529             ath6kl_dbg(ATH6KL_DBG_HTC,
1530                    "htc rx bundle pkts %d\n",
1531                    n_msg);
1532         } else
1533             /* HTC header only indicates 1 message to fetch */
1534             n_msg = 1;
1535 
1536         /* Setup packet buffers for each message */
1537         status = ath6kl_htc_rx_setup(target, endpoint, &lk_ahds[i],
1538                          queue, n_msg);
1539 
1540         /*
1541          * This is due to unavailability of buffers to rx entire data.
1542          * Return no error so that free buffers from queue can be used
1543          * to receive partial data.
1544          */
1545         if (status == -ENOSPC) {
1546             spin_unlock_bh(&target->rx_lock);
1547             return 0;
1548         }
1549 
1550         if (status)
1551             break;
1552     }
1553 
1554     spin_unlock_bh(&target->rx_lock);
1555 
1556     if (status) {
1557         list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
1558             list_del(&packet->list);
1559             htc_reclaim_rxbuf(target, packet,
1560                       &target->endpoint[packet->endpoint]);
1561         }
1562     }
1563 
1564     return status;
1565 }
1566 
1567 static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
1568 {
1569     if (packets->endpoint != ENDPOINT_0) {
1570         WARN_ON(1);
1571         return;
1572     }
1573 
1574     if (packets->status == -ECANCELED) {
1575         reclaim_rx_ctrl_buf(context, packets);
1576         return;
1577     }
1578 
1579     if (packets->act_len > 0) {
1580         ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
1581                packets->act_len + HTC_HDR_LENGTH);
1582 
1583         ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1584                 "htc rx unexpected endpoint 0 message", "",
1585                 packets->buf - HTC_HDR_LENGTH,
1586                 packets->act_len + HTC_HDR_LENGTH);
1587     }
1588 
1589     htc_reclaim_rxbuf(context, packets, &context->endpoint[0]);
1590 }
1591 
1592 static void htc_proc_cred_rpt(struct htc_target *target,
1593                   struct htc_credit_report *rpt,
1594                   int n_entries,
1595                   enum htc_endpoint_id from_ep)
1596 {
1597     struct htc_endpoint *endpoint;
1598     int tot_credits = 0, i;
1599     bool dist = false;
1600 
1601     spin_lock_bh(&target->tx_lock);
1602 
1603     for (i = 0; i < n_entries; i++, rpt++) {
1604         if (rpt->eid >= ENDPOINT_MAX) {
1605             WARN_ON(1);
1606             spin_unlock_bh(&target->tx_lock);
1607             return;
1608         }
1609 
1610         endpoint = &target->endpoint[rpt->eid];
1611 
1612         ath6kl_dbg(ATH6KL_DBG_CREDIT,
1613                "credit report ep %d credits %d\n",
1614                rpt->eid, rpt->credits);
1615 
1616         endpoint->ep_st.tx_cred_rpt += 1;
1617         endpoint->ep_st.cred_retnd += rpt->credits;
1618 
1619         if (from_ep == rpt->eid) {
1620             /*
1621              * This credit report arrived on the same endpoint
1622              * indicating it arrived in an RX packet.
1623              */
1624             endpoint->ep_st.cred_from_rx += rpt->credits;
1625             endpoint->ep_st.cred_rpt_from_rx += 1;
1626         } else if (from_ep == ENDPOINT_0) {
1627             /* credit arrived on endpoint 0 as a NULL message */
1628             endpoint->ep_st.cred_from_ep0 += rpt->credits;
1629             endpoint->ep_st.cred_rpt_ep0 += 1;
1630         } else {
1631             endpoint->ep_st.cred_from_other += rpt->credits;
1632             endpoint->ep_st.cred_rpt_from_other += 1;
1633         }
1634 
1635         if (rpt->eid == ENDPOINT_0)
1636             /* always give endpoint 0 credits back */
1637             endpoint->cred_dist.credits += rpt->credits;
1638         else {
1639             endpoint->cred_dist.cred_to_dist += rpt->credits;
1640             dist = true;
1641         }
1642 
1643         /*
1644          * Refresh tx depth for distribution function that will
1645          * recover these credits NOTE: this is only valid when
1646          * there are credits to recover!
1647          */
1648         endpoint->cred_dist.txq_depth =
1649             get_queue_depth(&endpoint->txq);
1650 
1651         tot_credits += rpt->credits;
1652     }
1653 
1654     if (dist) {
1655         /*
1656          * This was a credit return based on a completed send
1657          * operations note, this is done with the lock held
1658          */
1659         ath6kl_credit_distribute(target->credit_info,
1660                      &target->cred_dist_list,
1661                      HTC_CREDIT_DIST_SEND_COMPLETE);
1662     }
1663 
1664     spin_unlock_bh(&target->tx_lock);
1665 
1666     if (tot_credits)
1667         htc_chk_ep_txq(target);
1668 }
1669 
1670 static int htc_parse_trailer(struct htc_target *target,
1671                  struct htc_record_hdr *record,
1672                  u8 *record_buf, u32 *next_lk_ahds,
1673                  enum htc_endpoint_id endpoint,
1674                  int *n_lk_ahds)
1675 {
1676     struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt;
1677     struct htc_lookahead_report *lk_ahd;
1678     int len;
1679 
1680     switch (record->rec_id) {
1681     case HTC_RECORD_CREDITS:
1682         len = record->len / sizeof(struct htc_credit_report);
1683         if (!len) {
1684             WARN_ON(1);
1685             return -EINVAL;
1686         }
1687 
1688         htc_proc_cred_rpt(target,
1689                   (struct htc_credit_report *) record_buf,
1690                   len, endpoint);
1691         break;
1692     case HTC_RECORD_LOOKAHEAD:
1693         len = record->len / sizeof(*lk_ahd);
1694         if (!len) {
1695             WARN_ON(1);
1696             return -EINVAL;
1697         }
1698 
1699         lk_ahd = (struct htc_lookahead_report *) record_buf;
1700         if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) &&
1701             next_lk_ahds) {
1702             ath6kl_dbg(ATH6KL_DBG_HTC,
1703                    "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
1704                    lk_ahd->pre_valid, lk_ahd->post_valid);
1705 
1706             /* look ahead bytes are valid, copy them over */
1707             memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
1708 
1709             ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1710                     "htc rx next look ahead",
1711                     "", next_lk_ahds, 4);
1712 
1713             *n_lk_ahds = 1;
1714         }
1715         break;
1716     case HTC_RECORD_LOOKAHEAD_BUNDLE:
1717         len = record->len / sizeof(*bundle_lkahd_rpt);
1718         if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
1719             WARN_ON(1);
1720             return -EINVAL;
1721         }
1722 
1723         if (next_lk_ahds) {
1724             int i;
1725 
1726             bundle_lkahd_rpt =
1727                 (struct htc_bundle_lkahd_rpt *) record_buf;
1728 
1729             ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd",
1730                     "", record_buf, record->len);
1731 
1732             for (i = 0; i < len; i++) {
1733                 memcpy((u8 *)&next_lk_ahds[i],
1734                        bundle_lkahd_rpt->lk_ahd, 4);
1735                 bundle_lkahd_rpt++;
1736             }
1737 
1738             *n_lk_ahds = i;
1739         }
1740         break;
1741     default:
1742         ath6kl_err("unhandled record: id:%d len:%d\n",
1743                record->rec_id, record->len);
1744         break;
1745     }
1746 
1747     return 0;
1748 }
1749 
1750 static int htc_proc_trailer(struct htc_target *target,
1751                 u8 *buf, int len, u32 *next_lk_ahds,
1752                 int *n_lk_ahds, enum htc_endpoint_id endpoint)
1753 {
1754     struct htc_record_hdr *record;
1755     int orig_len;
1756     int status;
1757     u8 *record_buf;
1758     u8 *orig_buf;
1759 
1760     ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len);
1761     ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len);
1762 
1763     orig_buf = buf;
1764     orig_len = len;
1765     status = 0;
1766 
1767     while (len > 0) {
1768         if (len < sizeof(struct htc_record_hdr)) {
1769             status = -ENOMEM;
1770             break;
1771         }
1772         /* these are byte aligned structs */
1773         record = (struct htc_record_hdr *) buf;
1774         len -= sizeof(struct htc_record_hdr);
1775         buf += sizeof(struct htc_record_hdr);
1776 
1777         if (record->len > len) {
1778             ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
1779                    record->len, record->rec_id, len);
1780             status = -ENOMEM;
1781             break;
1782         }
1783         record_buf = buf;
1784 
1785         status = htc_parse_trailer(target, record, record_buf,
1786                        next_lk_ahds, endpoint, n_lk_ahds);
1787 
1788         if (status)
1789             break;
1790 
1791         /* advance buffer past this record for next time around */
1792         buf += record->len;
1793         len -= record->len;
1794     }
1795 
1796     if (status)
1797         ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer",
1798                 "", orig_buf, orig_len);
1799 
1800     return status;
1801 }
1802 
1803 static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
1804                      struct htc_packet *packet,
1805                      u32 *next_lkahds, int *n_lkahds)
1806 {
1807     int status = 0;
1808     u16 payload_len;
1809     u32 lk_ahd;
1810     struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
1811 
1812     if (n_lkahds != NULL)
1813         *n_lkahds = 0;
1814 
1815     /*
1816      * NOTE: we cannot assume the alignment of buf, so we use the safe
1817      * macros to retrieve 16 bit fields.
1818      */
1819     payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
1820 
1821     memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd));
1822 
1823     if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) {
1824         /*
1825          * Refresh the expected header and the actual length as it
1826          * was unknown when this packet was grabbed as part of the
1827          * bundle.
1828          */
1829         packet->info.rx.exp_hdr = lk_ahd;
1830         packet->act_len = payload_len + HTC_HDR_LENGTH;
1831 
1832         /* validate the actual header that was refreshed  */
1833         if (packet->act_len > packet->buf_len) {
1834             ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
1835                    payload_len, lk_ahd);
1836             /*
1837              * Limit this to max buffer just to print out some
1838              * of the buffer.
1839              */
1840             packet->act_len = min(packet->act_len, packet->buf_len);
1841             status = -ENOMEM;
1842             goto fail_rx;
1843         }
1844 
1845         if (packet->endpoint != htc_hdr->eid) {
1846             ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
1847                    htc_hdr->eid, packet->endpoint);
1848             status = -ENOMEM;
1849             goto fail_rx;
1850         }
1851     }
1852 
1853     if (lk_ahd != packet->info.rx.exp_hdr) {
1854         ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
1855                __func__, packet, packet->info.rx.rx_flags);
1856         ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd",
1857                 "", &packet->info.rx.exp_hdr, 4);
1858         ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header",
1859                 "", (u8 *)&lk_ahd, sizeof(lk_ahd));
1860         status = -ENOMEM;
1861         goto fail_rx;
1862     }
1863 
1864     if (htc_hdr->flags & HTC_FLG_RX_TRAILER) {
1865         if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) ||
1866             htc_hdr->ctrl[0] > payload_len) {
1867             ath6kl_err("%s(): invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
1868                    __func__, payload_len, htc_hdr->ctrl[0]);
1869             status = -ENOMEM;
1870             goto fail_rx;
1871         }
1872 
1873         if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
1874             next_lkahds = NULL;
1875             n_lkahds = NULL;
1876         }
1877 
1878         status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH
1879                       + payload_len - htc_hdr->ctrl[0],
1880                       htc_hdr->ctrl[0], next_lkahds,
1881                        n_lkahds, packet->endpoint);
1882 
1883         if (status)
1884             goto fail_rx;
1885 
1886         packet->act_len -= htc_hdr->ctrl[0];
1887     }
1888 
1889     packet->buf += HTC_HDR_LENGTH;
1890     packet->act_len -= HTC_HDR_LENGTH;
1891 
1892 fail_rx:
1893     if (status)
1894         ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet",
1895                 "", packet->buf, packet->act_len);
1896 
1897     return status;
1898 }
1899 
1900 static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
1901                    struct htc_packet *packet)
1902 {
1903         ath6kl_dbg(ATH6KL_DBG_HTC,
1904                "htc rx complete ep %d packet 0x%p\n",
1905                endpoint->eid, packet);
1906 
1907         endpoint->ep_cb.rx(endpoint->target, packet);
1908 }
1909 
1910 static int ath6kl_htc_rx_bundle(struct htc_target *target,
1911                 struct list_head *rxq,
1912                 struct list_head *sync_compq,
1913                 int *n_pkt_fetched, bool part_bundle)
1914 {
1915     struct hif_scatter_req *scat_req;
1916     struct htc_packet *packet;
1917     int rem_space = target->max_rx_bndl_sz;
1918     int n_scat_pkt, status = 0, i, len;
1919 
1920     n_scat_pkt = get_queue_depth(rxq);
1921     n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max);
1922 
1923     if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
1924         /*
1925          * We were forced to split this bundle receive operation
1926          * all packets in this partial bundle must have their
1927          * lookaheads ignored.
1928          */
1929         part_bundle = true;
1930 
1931         /*
1932          * This would only happen if the target ignored our max
1933          * bundle limit.
1934          */
1935         ath6kl_warn("%s(): partial bundle detected num:%d , %d\n",
1936                 __func__, get_queue_depth(rxq), n_scat_pkt);
1937     }
1938 
1939     len = 0;
1940 
1941     ath6kl_dbg(ATH6KL_DBG_HTC,
1942            "htc rx bundle depth %d pkts %d\n",
1943            get_queue_depth(rxq), n_scat_pkt);
1944 
1945     scat_req = hif_scatter_req_get(target->dev->ar);
1946 
1947     if (scat_req == NULL)
1948         goto fail_rx_pkt;
1949 
1950     for (i = 0; i < n_scat_pkt; i++) {
1951         int pad_len;
1952 
1953         packet = list_first_entry(rxq, struct htc_packet, list);
1954         list_del(&packet->list);
1955 
1956         pad_len = CALC_TXRX_PADDED_LEN(target,
1957                            packet->act_len);
1958 
1959         if ((rem_space - pad_len) < 0) {
1960             list_add(&packet->list, rxq);
1961             break;
1962         }
1963 
1964         rem_space -= pad_len;
1965 
1966         if (part_bundle || (i < (n_scat_pkt - 1)))
1967             /*
1968              * Packet 0..n-1 cannot be checked for look-aheads
1969              * since we are fetching a bundle the last packet
1970              * however can have it's lookahead used
1971              */
1972             packet->info.rx.rx_flags |=
1973                 HTC_RX_PKT_IGNORE_LOOKAHEAD;
1974 
1975         /* NOTE: 1 HTC packet per scatter entry */
1976         scat_req->scat_list[i].buf = packet->buf;
1977         scat_req->scat_list[i].len = pad_len;
1978 
1979         packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE;
1980 
1981         list_add_tail(&packet->list, sync_compq);
1982 
1983         WARN_ON(!scat_req->scat_list[i].len);
1984         len += scat_req->scat_list[i].len;
1985     }
1986 
1987     scat_req->len = len;
1988     scat_req->scat_entries = i;
1989 
1990     status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true);
1991 
1992     if (!status)
1993         *n_pkt_fetched = i;
1994 
1995     /* free scatter request */
1996     hif_scatter_req_add(target->dev->ar, scat_req);
1997 
1998 fail_rx_pkt:
1999 
2000     return status;
2001 }
2002 
2003 static int ath6kl_htc_rx_process_packets(struct htc_target *target,
2004                      struct list_head *comp_pktq,
2005                      u32 lk_ahds[],
2006                      int *n_lk_ahd)
2007 {
2008     struct htc_packet *packet, *tmp_pkt;
2009     struct htc_endpoint *ep;
2010     int status = 0;
2011 
2012     list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
2013         ep = &target->endpoint[packet->endpoint];
2014 
2015         trace_ath6kl_htc_rx(packet->status, packet->endpoint,
2016                     packet->buf, packet->act_len);
2017 
2018         /* process header for each of the recv packet */
2019         status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds,
2020                            n_lk_ahd);
2021         if (status)
2022             return status;
2023 
2024         list_del(&packet->list);
2025 
2026         if (list_empty(comp_pktq)) {
2027             /*
2028              * Last packet's more packet flag is set
2029              * based on the lookahead.
2030              */
2031             if (*n_lk_ahd > 0)
2032                 ath6kl_htc_rx_set_indicate(lk_ahds[0],
2033                                ep, packet);
2034         } else
2035             /*
2036              * Packets in a bundle automatically have
2037              * this flag set.
2038              */
2039             packet->info.rx.indicat_flags |=
2040                 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
2041 
2042         ath6kl_htc_rx_update_stats(ep, *n_lk_ahd);
2043 
2044         if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
2045             ep->ep_st.rx_bundl += 1;
2046 
2047         ath6kl_htc_rx_complete(ep, packet);
2048     }
2049 
2050     return status;
2051 }
2052 
2053 static int ath6kl_htc_rx_fetch(struct htc_target *target,
2054                    struct list_head *rx_pktq,
2055                    struct list_head *comp_pktq)
2056 {
2057     int fetched_pkts;
2058     bool part_bundle = false;
2059     int status = 0;
2060     struct list_head tmp_rxq;
2061     struct htc_packet *packet, *tmp_pkt;
2062 
2063     /* now go fetch the list of HTC packets */
2064     while (!list_empty(rx_pktq)) {
2065         fetched_pkts = 0;
2066 
2067         INIT_LIST_HEAD(&tmp_rxq);
2068 
2069         if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
2070             /*
2071              * There are enough packets to attempt a
2072              * bundle transfer and recv bundling is
2073              * allowed.
2074              */
2075             status = ath6kl_htc_rx_bundle(target, rx_pktq,
2076                               &tmp_rxq,
2077                               &fetched_pkts,
2078                               part_bundle);
2079             if (status)
2080                 goto fail_rx;
2081 
2082             if (!list_empty(rx_pktq))
2083                 part_bundle = true;
2084 
2085             list_splice_tail_init(&tmp_rxq, comp_pktq);
2086         }
2087 
2088         if (!fetched_pkts) {
2089             packet = list_first_entry(rx_pktq, struct htc_packet,
2090                            list);
2091 
2092             /* fully synchronous */
2093             packet->completion = NULL;
2094 
2095             if (!list_is_singular(rx_pktq))
2096                 /*
2097                  * look_aheads in all packet
2098                  * except the last one in the
2099                  * bundle must be ignored
2100                  */
2101                 packet->info.rx.rx_flags |=
2102                     HTC_RX_PKT_IGNORE_LOOKAHEAD;
2103 
2104             /* go fetch the packet */
2105             status = ath6kl_htc_rx_packet(target, packet,
2106                               packet->act_len);
2107 
2108             list_move_tail(&packet->list, &tmp_rxq);
2109 
2110             if (status)
2111                 goto fail_rx;
2112 
2113             list_splice_tail_init(&tmp_rxq, comp_pktq);
2114         }
2115     }
2116 
2117     return 0;
2118 
2119 fail_rx:
2120 
2121     /*
2122      * Cleanup any packets we allocated but didn't use to
2123      * actually fetch any packets.
2124      */
2125 
2126     list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) {
2127         list_del(&packet->list);
2128         htc_reclaim_rxbuf(target, packet,
2129                   &target->endpoint[packet->endpoint]);
2130     }
2131 
2132     list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) {
2133         list_del(&packet->list);
2134         htc_reclaim_rxbuf(target, packet,
2135                   &target->endpoint[packet->endpoint]);
2136     }
2137 
2138     return status;
2139 }
2140 
2141 int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
2142                      u32 msg_look_ahead, int *num_pkts)
2143 {
2144     struct htc_packet *packets, *tmp_pkt;
2145     struct htc_endpoint *endpoint;
2146     struct list_head rx_pktq, comp_pktq;
2147     int status = 0;
2148     u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
2149     int num_look_ahead = 1;
2150     enum htc_endpoint_id id;
2151     int n_fetched = 0;
2152 
2153     INIT_LIST_HEAD(&comp_pktq);
2154     *num_pkts = 0;
2155 
2156     /*
2157      * On first entry copy the look_aheads into our temp array for
2158      * processing
2159      */
2160     look_aheads[0] = msg_look_ahead;
2161 
2162     while (true) {
2163         /*
2164          * First lookahead sets the expected endpoint IDs for all
2165          * packets in a bundle.
2166          */
2167         id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
2168         endpoint = &target->endpoint[id];
2169 
2170         if (id >= ENDPOINT_MAX) {
2171             ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
2172                    id);
2173             status = -ENOMEM;
2174             break;
2175         }
2176 
2177         INIT_LIST_HEAD(&rx_pktq);
2178         INIT_LIST_HEAD(&comp_pktq);
2179 
2180         /*
2181          * Try to allocate as many HTC RX packets indicated by the
2182          * look_aheads.
2183          */
2184         status = ath6kl_htc_rx_alloc(target, look_aheads,
2185                          num_look_ahead, endpoint,
2186                          &rx_pktq);
2187         if (status)
2188             break;
2189 
2190         if (get_queue_depth(&rx_pktq) >= 2)
2191             /*
2192              * A recv bundle was detected, force IRQ status
2193              * re-check again
2194              */
2195             target->chk_irq_status_cnt = 1;
2196 
2197         n_fetched += get_queue_depth(&rx_pktq);
2198 
2199         num_look_ahead = 0;
2200 
2201         status = ath6kl_htc_rx_fetch(target, &rx_pktq, &comp_pktq);
2202 
2203         if (!status)
2204             ath6kl_htc_rx_chk_water_mark(endpoint);
2205 
2206         /* Process fetched packets */
2207         status = ath6kl_htc_rx_process_packets(target, &comp_pktq,
2208                                look_aheads,
2209                                &num_look_ahead);
2210 
2211         if (!num_look_ahead || status)
2212             break;
2213 
2214         /*
2215          * For SYNCH processing, if we get here, we are running
2216          * through the loop again due to a detected lookahead. Set
2217          * flag that we should re-check IRQ status registers again
2218          * before leaving IRQ processing, this can net better
2219          * performance in high throughput situations.
2220          */
2221         target->chk_irq_status_cnt = 1;
2222     }
2223 
2224     if (status) {
2225         if (status != -ECANCELED)
2226             ath6kl_err("failed to get pending recv messages: %d\n",
2227                    status);
2228 
2229         /* cleanup any packets in sync completion queue */
2230         list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
2231             list_del(&packets->list);
2232             htc_reclaim_rxbuf(target, packets,
2233                       &target->endpoint[packets->endpoint]);
2234         }
2235 
2236         if (target->htc_flags & HTC_OP_STATE_STOPPING) {
2237             ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
2238             ath6kl_hif_rx_control(target->dev, false);
2239         }
2240     }
2241 
2242     /*
2243      * Before leaving, check to see if host ran out of buffers and
2244      * needs to stop the receiver.
2245      */
2246     if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
2247         ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
2248         ath6kl_hif_rx_control(target->dev, false);
2249     }
2250     *num_pkts = n_fetched;
2251 
2252     return status;
2253 }
2254 
2255 /*
2256  * Synchronously wait for a control message from the target,
2257  * This function is used at initialization time ONLY.  At init messages
2258  * on ENDPOINT 0 are expected.
2259  */
2260 static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
2261 {
2262     struct htc_packet *packet = NULL;
2263     struct htc_frame_look_ahead look_ahead;
2264 
2265     if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead.word,
2266                        HTC_TARGET_RESPONSE_TIMEOUT))
2267         return NULL;
2268 
2269     ath6kl_dbg(ATH6KL_DBG_HTC,
2270            "htc rx wait ctrl look_ahead 0x%X\n", look_ahead.word);
2271 
2272     if (look_ahead.eid != ENDPOINT_0)
2273         return NULL;
2274 
2275     packet = htc_get_control_buf(target, false);
2276 
2277     if (!packet)
2278         return NULL;
2279 
2280     packet->info.rx.rx_flags = 0;
2281     packet->info.rx.exp_hdr = look_ahead.word;
2282     packet->act_len = le16_to_cpu(look_ahead.payld_len) + HTC_HDR_LENGTH;
2283 
2284     if (packet->act_len > packet->buf_len)
2285         goto fail_ctrl_rx;
2286 
2287     /* we want synchronous operation */
2288     packet->completion = NULL;
2289 
2290     /* get the message from the device, this will block */
2291     if (ath6kl_htc_rx_packet(target, packet, packet->act_len))
2292         goto fail_ctrl_rx;
2293 
2294     trace_ath6kl_htc_rx(packet->status, packet->endpoint,
2295                 packet->buf, packet->act_len);
2296 
2297     /* process receive header */
2298     packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL);
2299 
2300     if (packet->status) {
2301         ath6kl_err("htc_wait_for_ctrl_msg, ath6kl_htc_rx_process_hdr failed (status = %d)\n",
2302                packet->status);
2303         goto fail_ctrl_rx;
2304     }
2305 
2306     return packet;
2307 
2308 fail_ctrl_rx:
2309     if (packet != NULL) {
2310         htc_rxpkt_reset(packet);
2311         reclaim_rx_ctrl_buf(target, packet);
2312     }
2313 
2314     return NULL;
2315 }
2316 
2317 static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target,
2318                   struct list_head *pkt_queue)
2319 {
2320     struct htc_endpoint *endpoint;
2321     struct htc_packet *first_pkt;
2322     bool rx_unblock = false;
2323     int status = 0, depth;
2324 
2325     if (list_empty(pkt_queue))
2326         return -ENOMEM;
2327 
2328     first_pkt = list_first_entry(pkt_queue, struct htc_packet, list);
2329 
2330     if (first_pkt->endpoint >= ENDPOINT_MAX)
2331         return status;
2332 
2333     depth = get_queue_depth(pkt_queue);
2334 
2335     ath6kl_dbg(ATH6KL_DBG_HTC,
2336            "htc rx add multiple ep id %d cnt %d len %d\n",
2337         first_pkt->endpoint, depth, first_pkt->buf_len);
2338 
2339     endpoint = &target->endpoint[first_pkt->endpoint];
2340 
2341     if (target->htc_flags & HTC_OP_STATE_STOPPING) {
2342         struct htc_packet *packet, *tmp_pkt;
2343 
2344         /* walk through queue and mark each one canceled */
2345         list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
2346             packet->status = -ECANCELED;
2347             list_del(&packet->list);
2348             ath6kl_htc_rx_complete(endpoint, packet);
2349         }
2350 
2351         return status;
2352     }
2353 
2354     spin_lock_bh(&target->rx_lock);
2355 
2356     list_splice_tail_init(pkt_queue, &endpoint->rx_bufq);
2357 
2358     /* check if we are blocked waiting for a new buffer */
2359     if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
2360         if (target->ep_waiting == first_pkt->endpoint) {
2361             ath6kl_dbg(ATH6KL_DBG_HTC,
2362                    "htc rx blocked on ep %d, unblocking\n",
2363                    target->ep_waiting);
2364             target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
2365             target->ep_waiting = ENDPOINT_MAX;
2366             rx_unblock = true;
2367         }
2368     }
2369 
2370     spin_unlock_bh(&target->rx_lock);
2371 
2372     if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
2373         /* TODO : implement a buffer threshold count? */
2374         ath6kl_hif_rx_control(target->dev, true);
2375 
2376     return status;
2377 }
2378 
2379 static void ath6kl_htc_mbox_flush_rx_buf(struct htc_target *target)
2380 {
2381     struct htc_endpoint *endpoint;
2382     struct htc_packet *packet, *tmp_pkt;
2383     int i;
2384 
2385     for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2386         endpoint = &target->endpoint[i];
2387         if (!endpoint->svc_id)
2388             /* not in use.. */
2389             continue;
2390 
2391         spin_lock_bh(&target->rx_lock);
2392         list_for_each_entry_safe(packet, tmp_pkt,
2393                      &endpoint->rx_bufq, list) {
2394             list_del(&packet->list);
2395             spin_unlock_bh(&target->rx_lock);
2396             ath6kl_dbg(ATH6KL_DBG_HTC,
2397                    "htc rx flush pkt 0x%p  len %d  ep %d\n",
2398                    packet, packet->buf_len,
2399                    packet->endpoint);
2400             /*
2401              * packets in rx_bufq of endpoint 0 have originally
2402              * been queued from target->free_ctrl_rxbuf where
2403              * packet and packet->buf_start are allocated
2404              * separately using kmalloc(). For other endpoint
2405              * rx_bufq, it is allocated as skb where packet is
2406              * skb->head. Take care of this difference while freeing
2407              * the memory.
2408              */
2409             if (packet->endpoint == ENDPOINT_0) {
2410                 kfree(packet->buf_start);
2411                 kfree(packet);
2412             } else {
2413                 dev_kfree_skb(packet->pkt_cntxt);
2414             }
2415             spin_lock_bh(&target->rx_lock);
2416         }
2417         spin_unlock_bh(&target->rx_lock);
2418     }
2419 }
2420 
2421 static int ath6kl_htc_mbox_conn_service(struct htc_target *target,
2422                 struct htc_service_connect_req *conn_req,
2423                 struct htc_service_connect_resp *conn_resp)
2424 {
2425     struct htc_packet *rx_pkt = NULL;
2426     struct htc_packet *tx_pkt = NULL;
2427     struct htc_conn_service_resp *resp_msg;
2428     struct htc_conn_service_msg *conn_msg;
2429     struct htc_endpoint *endpoint;
2430     enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
2431     unsigned int max_msg_sz = 0;
2432     int status = 0;
2433     u16 msg_id;
2434 
2435     ath6kl_dbg(ATH6KL_DBG_HTC,
2436            "htc connect service target 0x%p service id 0x%x\n",
2437            target, conn_req->svc_id);
2438 
2439     if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
2440         /* special case for pseudo control service */
2441         assigned_ep = ENDPOINT_0;
2442         max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
2443     } else {
2444         /* allocate a packet to send to the target */
2445         tx_pkt = htc_get_control_buf(target, true);
2446 
2447         if (!tx_pkt)
2448             return -ENOMEM;
2449 
2450         conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf;
2451         memset(conn_msg, 0, sizeof(*conn_msg));
2452         conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
2453         conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
2454         conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags);
2455 
2456         set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg,
2457                  sizeof(*conn_msg) + conn_msg->svc_meta_len,
2458                  ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
2459 
2460         /* we want synchronous operation */
2461         tx_pkt->completion = NULL;
2462         ath6kl_htc_tx_prep_pkt(tx_pkt, 0, 0, 0);
2463         status = ath6kl_htc_tx_issue(target, tx_pkt);
2464 
2465         if (status)
2466             goto fail_tx;
2467 
2468         /* wait for response */
2469         rx_pkt = htc_wait_for_ctrl_msg(target);
2470 
2471         if (!rx_pkt) {
2472             status = -ENOMEM;
2473             goto fail_tx;
2474         }
2475 
2476         resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
2477         msg_id = le16_to_cpu(resp_msg->msg_id);
2478 
2479         if ((msg_id != HTC_MSG_CONN_SVC_RESP_ID) ||
2480             (rx_pkt->act_len < sizeof(*resp_msg))) {
2481             status = -ENOMEM;
2482             goto fail_tx;
2483         }
2484 
2485         conn_resp->resp_code = resp_msg->status;
2486         /* check response status */
2487         if (resp_msg->status != HTC_SERVICE_SUCCESS) {
2488             ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
2489                    resp_msg->svc_id, resp_msg->status);
2490             status = -ENOMEM;
2491             goto fail_tx;
2492         }
2493 
2494         assigned_ep = (enum htc_endpoint_id)resp_msg->eid;
2495         max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
2496     }
2497 
2498     if (WARN_ON_ONCE(assigned_ep == ENDPOINT_UNUSED ||
2499              assigned_ep >= ENDPOINT_MAX || !max_msg_sz)) {
2500         status = -ENOMEM;
2501         goto fail_tx;
2502     }
2503 
2504     endpoint = &target->endpoint[assigned_ep];
2505     endpoint->eid = assigned_ep;
2506     if (endpoint->svc_id) {
2507         status = -ENOMEM;
2508         goto fail_tx;
2509     }
2510 
2511     /* return assigned endpoint to caller */
2512     conn_resp->endpoint = assigned_ep;
2513     conn_resp->len_max = max_msg_sz;
2514 
2515     /* setup the endpoint */
2516 
2517     /* this marks the endpoint in use */
2518     endpoint->svc_id = conn_req->svc_id;
2519 
2520     endpoint->max_txq_depth = conn_req->max_txq_depth;
2521     endpoint->len_max = max_msg_sz;
2522     endpoint->ep_cb = conn_req->ep_cb;
2523     endpoint->cred_dist.svc_id = conn_req->svc_id;
2524     endpoint->cred_dist.htc_ep = endpoint;
2525     endpoint->cred_dist.endpoint = assigned_ep;
2526     endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
2527 
2528     switch (endpoint->svc_id) {
2529     case WMI_DATA_BK_SVC:
2530         endpoint->tx_drop_packet_threshold = MAX_DEF_COOKIE_NUM / 3;
2531         break;
2532     default:
2533         endpoint->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM;
2534         break;
2535     }
2536 
2537     if (conn_req->max_rxmsg_sz) {
2538         /*
2539          * Override cred_per_msg calculation, this optimizes
2540          * the credit-low indications since the host will actually
2541          * issue smaller messages in the Send path.
2542          */
2543         if (conn_req->max_rxmsg_sz > max_msg_sz) {
2544             status = -ENOMEM;
2545             goto fail_tx;
2546         }
2547         endpoint->cred_dist.cred_per_msg =
2548             conn_req->max_rxmsg_sz / target->tgt_cred_sz;
2549     } else
2550         endpoint->cred_dist.cred_per_msg =
2551             max_msg_sz / target->tgt_cred_sz;
2552 
2553     if (!endpoint->cred_dist.cred_per_msg)
2554         endpoint->cred_dist.cred_per_msg = 1;
2555 
2556     /* save local connection flags */
2557     endpoint->conn_flags = conn_req->flags;
2558 
2559 fail_tx:
2560     if (tx_pkt)
2561         htc_reclaim_txctrl_buf(target, tx_pkt);
2562 
2563     if (rx_pkt) {
2564         htc_rxpkt_reset(rx_pkt);
2565         reclaim_rx_ctrl_buf(target, rx_pkt);
2566     }
2567 
2568     return status;
2569 }
2570 
2571 static void reset_ep_state(struct htc_target *target)
2572 {
2573     struct htc_endpoint *endpoint;
2574     int i;
2575 
2576     for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2577         endpoint = &target->endpoint[i];
2578         memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist));
2579         endpoint->svc_id = 0;
2580         endpoint->len_max = 0;
2581         endpoint->max_txq_depth = 0;
2582         memset(&endpoint->ep_st, 0,
2583                sizeof(endpoint->ep_st));
2584         INIT_LIST_HEAD(&endpoint->rx_bufq);
2585         INIT_LIST_HEAD(&endpoint->txq);
2586         endpoint->target = target;
2587     }
2588 
2589     /* reset distribution list */
2590     /* FIXME: free existing entries */
2591     INIT_LIST_HEAD(&target->cred_dist_list);
2592 }
2593 
2594 static int ath6kl_htc_mbox_get_rxbuf_num(struct htc_target *target,
2595                  enum htc_endpoint_id endpoint)
2596 {
2597     int num;
2598 
2599     spin_lock_bh(&target->rx_lock);
2600     num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
2601     spin_unlock_bh(&target->rx_lock);
2602     return num;
2603 }
2604 
2605 static void htc_setup_msg_bndl(struct htc_target *target)
2606 {
2607     /* limit what HTC can handle */
2608     target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
2609                        target->msg_per_bndl_max);
2610 
2611     if (ath6kl_hif_enable_scatter(target->dev->ar)) {
2612         target->msg_per_bndl_max = 0;
2613         return;
2614     }
2615 
2616     /* limit bundle what the device layer can handle */
2617     target->msg_per_bndl_max = min(target->max_scat_entries,
2618                        target->msg_per_bndl_max);
2619 
2620     ath6kl_dbg(ATH6KL_DBG_BOOT,
2621            "htc bundling allowed msg_per_bndl_max %d\n",
2622            target->msg_per_bndl_max);
2623 
2624     /* Max rx bundle size is limited by the max tx bundle size */
2625     target->max_rx_bndl_sz = target->max_xfer_szper_scatreq;
2626     /* Max tx bundle size if limited by the extended mbox address range */
2627     target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
2628                      target->max_xfer_szper_scatreq);
2629 
2630     ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n",
2631            target->max_rx_bndl_sz, target->max_tx_bndl_sz);
2632 
2633     if (target->max_tx_bndl_sz)
2634         /* tx_bndl_mask is enabled per AC, each has 1 bit */
2635         target->tx_bndl_mask = (1 << WMM_NUM_AC) - 1;
2636 
2637     if (target->max_rx_bndl_sz)
2638         target->rx_bndl_enable = true;
2639 
2640     if ((target->tgt_cred_sz % target->block_sz) != 0) {
2641         ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
2642                 target->tgt_cred_sz);
2643 
2644         /*
2645          * Disallow send bundling since the credit size is
2646          * not aligned to a block size the I/O block
2647          * padding will spill into the next credit buffer
2648          * which is fatal.
2649          */
2650         target->tx_bndl_mask = 0;
2651     }
2652 }
2653 
2654 static int ath6kl_htc_mbox_wait_target(struct htc_target *target)
2655 {
2656     struct htc_packet *packet = NULL;
2657     struct htc_ready_ext_msg *rdy_msg;
2658     struct htc_service_connect_req connect;
2659     struct htc_service_connect_resp resp;
2660     int status;
2661 
2662     /* we should be getting 1 control message that the target is ready */
2663     packet = htc_wait_for_ctrl_msg(target);
2664 
2665     if (!packet)
2666         return -ENOMEM;
2667 
2668     /* we controlled the buffer creation so it's properly aligned */
2669     rdy_msg = (struct htc_ready_ext_msg *)packet->buf;
2670 
2671     if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) ||
2672         (packet->act_len < sizeof(struct htc_ready_msg))) {
2673         status = -ENOMEM;
2674         goto fail_wait_target;
2675     }
2676 
2677     if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) {
2678         status = -ENOMEM;
2679         goto fail_wait_target;
2680     }
2681 
2682     target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
2683     target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
2684 
2685     ath6kl_dbg(ATH6KL_DBG_BOOT,
2686            "htc target ready credits %d size %d\n",
2687            target->tgt_creds, target->tgt_cred_sz);
2688 
2689     /* check if this is an extended ready message */
2690     if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) {
2691         /* this is an extended message */
2692         target->htc_tgt_ver = rdy_msg->htc_ver;
2693         target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl;
2694     } else {
2695         /* legacy */
2696         target->htc_tgt_ver = HTC_VERSION_2P0;
2697         target->msg_per_bndl_max = 0;
2698     }
2699 
2700     ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n",
2701            (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
2702            target->htc_tgt_ver);
2703 
2704     if (target->msg_per_bndl_max > 0)
2705         htc_setup_msg_bndl(target);
2706 
2707     /* setup our pseudo HTC control endpoint connection */
2708     memset(&connect, 0, sizeof(connect));
2709     memset(&resp, 0, sizeof(resp));
2710     connect.ep_cb.rx = htc_ctrl_rx;
2711     connect.ep_cb.rx_refill = NULL;
2712     connect.ep_cb.tx_full = NULL;
2713     connect.max_txq_depth = NUM_CONTROL_BUFFERS;
2714     connect.svc_id = HTC_CTRL_RSVD_SVC;
2715 
2716     /* connect fake service */
2717     status = ath6kl_htc_mbox_conn_service((void *)target, &connect, &resp);
2718 
2719     if (status)
2720         /*
2721          * FIXME: this call doesn't make sense, the caller should
2722          * call ath6kl_htc_mbox_cleanup() when it wants remove htc
2723          */
2724         ath6kl_hif_cleanup_scatter(target->dev->ar);
2725 
2726 fail_wait_target:
2727     if (packet) {
2728         htc_rxpkt_reset(packet);
2729         reclaim_rx_ctrl_buf(target, packet);
2730     }
2731 
2732     return status;
2733 }
2734 
2735 /*
2736  * Start HTC, enable interrupts and let the target know
2737  * host has finished setup.
2738  */
2739 static int ath6kl_htc_mbox_start(struct htc_target *target)
2740 {
2741     struct htc_packet *packet;
2742     int status;
2743 
2744     memset(&target->dev->irq_proc_reg, 0,
2745            sizeof(target->dev->irq_proc_reg));
2746 
2747     /* Disable interrupts at the chip level */
2748     ath6kl_hif_disable_intrs(target->dev);
2749 
2750     target->htc_flags = 0;
2751     target->rx_st_flags = 0;
2752 
2753     /* Push control receive buffers into htc control endpoint */
2754     while ((packet = htc_get_control_buf(target, false)) != NULL) {
2755         status = htc_add_rxbuf(target, packet);
2756         if (status)
2757             return status;
2758     }
2759 
2760     /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
2761     ath6kl_credit_init(target->credit_info, &target->cred_dist_list,
2762                target->tgt_creds);
2763 
2764     dump_cred_dist_stats(target);
2765 
2766     /* Indicate to the target of the setup completion */
2767     status = htc_setup_tx_complete(target);
2768 
2769     if (status)
2770         return status;
2771 
2772     /* unmask interrupts */
2773     status = ath6kl_hif_unmask_intrs(target->dev);
2774 
2775     if (status)
2776         ath6kl_htc_mbox_stop(target);
2777 
2778     return status;
2779 }
2780 
2781 static int ath6kl_htc_reset(struct htc_target *target)
2782 {
2783     u32 block_size, ctrl_bufsz;
2784     struct htc_packet *packet;
2785     int i;
2786 
2787     reset_ep_state(target);
2788 
2789     block_size = target->dev->ar->mbox_info.block_size;
2790 
2791     ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
2792               (block_size + HTC_HDR_LENGTH) :
2793               (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
2794 
2795     for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
2796         packet = kzalloc(sizeof(*packet), GFP_KERNEL);
2797         if (!packet)
2798             return -ENOMEM;
2799 
2800         packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
2801         if (!packet->buf_start) {
2802             kfree(packet);
2803             return -ENOMEM;
2804         }
2805 
2806         packet->buf_len = ctrl_bufsz;
2807         if (i < NUM_CONTROL_RX_BUFFERS) {
2808             packet->act_len = 0;
2809             packet->buf = packet->buf_start;
2810             packet->endpoint = ENDPOINT_0;
2811             list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
2812         } else {
2813             list_add_tail(&packet->list, &target->free_ctrl_txbuf);
2814         }
2815     }
2816 
2817     return 0;
2818 }
2819 
2820 /* htc_stop: stop interrupt reception, and flush all queued buffers */
2821 static void ath6kl_htc_mbox_stop(struct htc_target *target)
2822 {
2823     spin_lock_bh(&target->htc_lock);
2824     target->htc_flags |= HTC_OP_STATE_STOPPING;
2825     spin_unlock_bh(&target->htc_lock);
2826 
2827     /*
2828      * Masking interrupts is a synchronous operation, when this
2829      * function returns all pending HIF I/O has completed, we can
2830      * safely flush the queues.
2831      */
2832     ath6kl_hif_mask_intrs(target->dev);
2833 
2834     ath6kl_htc_flush_txep_all(target);
2835 
2836     ath6kl_htc_mbox_flush_rx_buf(target);
2837 
2838     ath6kl_htc_reset(target);
2839 }
2840 
2841 static void *ath6kl_htc_mbox_create(struct ath6kl *ar)
2842 {
2843     struct htc_target *target = NULL;
2844     int status = 0;
2845 
2846     target = kzalloc(sizeof(*target), GFP_KERNEL);
2847     if (!target) {
2848         ath6kl_err("unable to allocate memory\n");
2849         return NULL;
2850     }
2851 
2852     target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
2853     if (!target->dev) {
2854         ath6kl_err("unable to allocate memory\n");
2855         kfree(target);
2856         return NULL;
2857     }
2858 
2859     spin_lock_init(&target->htc_lock);
2860     spin_lock_init(&target->rx_lock);
2861     spin_lock_init(&target->tx_lock);
2862 
2863     INIT_LIST_HEAD(&target->free_ctrl_txbuf);
2864     INIT_LIST_HEAD(&target->free_ctrl_rxbuf);
2865     INIT_LIST_HEAD(&target->cred_dist_list);
2866 
2867     target->dev->ar = ar;
2868     target->dev->htc_cnxt = target;
2869     target->ep_waiting = ENDPOINT_MAX;
2870 
2871     status = ath6kl_hif_setup(target->dev);
2872     if (status)
2873         goto err_htc_cleanup;
2874 
2875     status = ath6kl_htc_reset(target);
2876     if (status)
2877         goto err_htc_cleanup;
2878 
2879     return target;
2880 
2881 err_htc_cleanup:
2882     ath6kl_htc_mbox_cleanup(target);
2883 
2884     return NULL;
2885 }
2886 
2887 /* cleanup the HTC instance */
2888 static void ath6kl_htc_mbox_cleanup(struct htc_target *target)
2889 {
2890     struct htc_packet *packet, *tmp_packet;
2891 
2892     ath6kl_hif_cleanup_scatter(target->dev->ar);
2893 
2894     list_for_each_entry_safe(packet, tmp_packet,
2895                  &target->free_ctrl_txbuf, list) {
2896         list_del(&packet->list);
2897         kfree(packet->buf_start);
2898         kfree(packet);
2899     }
2900 
2901     list_for_each_entry_safe(packet, tmp_packet,
2902                  &target->free_ctrl_rxbuf, list) {
2903         list_del(&packet->list);
2904         kfree(packet->buf_start);
2905         kfree(packet);
2906     }
2907 
2908     kfree(target->dev);
2909     kfree(target);
2910 }
2911 
2912 static const struct ath6kl_htc_ops ath6kl_htc_mbox_ops = {
2913     .create = ath6kl_htc_mbox_create,
2914     .wait_target = ath6kl_htc_mbox_wait_target,
2915     .start = ath6kl_htc_mbox_start,
2916     .conn_service = ath6kl_htc_mbox_conn_service,
2917     .tx = ath6kl_htc_mbox_tx,
2918     .stop = ath6kl_htc_mbox_stop,
2919     .cleanup = ath6kl_htc_mbox_cleanup,
2920     .flush_txep = ath6kl_htc_mbox_flush_txep,
2921     .flush_rx_buf = ath6kl_htc_mbox_flush_rx_buf,
2922     .activity_changed = ath6kl_htc_mbox_activity_changed,
2923     .get_rxbuf_num = ath6kl_htc_mbox_get_rxbuf_num,
2924     .add_rxbuf_multiple = ath6kl_htc_mbox_add_rxbuf_multiple,
2925     .credit_setup = ath6kl_htc_mbox_credit_setup,
2926 };
2927 
2928 void ath6kl_htc_mbox_attach(struct ath6kl *ar)
2929 {
2930     ar->htc_ops = &ath6kl_htc_mbox_ops;
2931 }