Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Queue between the tx operation and the bh workqueue.
0004  *
0005  * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
0006  * Copyright (c) 2010, ST-Ericsson
0007  */
0008 #include <linux/sched.h>
0009 #include <net/mac80211.h>
0010 
0011 #include "queue.h"
0012 #include "wfx.h"
0013 #include "sta.h"
0014 #include "data_tx.h"
0015 #include "traces.h"
0016 
0017 void wfx_tx_lock(struct wfx_dev *wdev)
0018 {
0019     atomic_inc(&wdev->tx_lock);
0020 }
0021 
0022 void wfx_tx_unlock(struct wfx_dev *wdev)
0023 {
0024     int tx_lock = atomic_dec_return(&wdev->tx_lock);
0025 
0026     WARN(tx_lock < 0, "inconsistent tx_lock value");
0027     if (!tx_lock)
0028         wfx_bh_request_tx(wdev);
0029 }
0030 
0031 void wfx_tx_flush(struct wfx_dev *wdev)
0032 {
0033     int ret;
0034 
0035     /* Do not wait for any reply if chip is frozen */
0036     if (wdev->chip_frozen)
0037         return;
0038 
0039     wfx_tx_lock(wdev);
0040     mutex_lock(&wdev->hif_cmd.lock);
0041     ret = wait_event_timeout(wdev->hif.tx_buffers_empty, !wdev->hif.tx_buffers_used,
0042                  msecs_to_jiffies(3000));
0043     if (!ret) {
0044         dev_warn(wdev->dev, "cannot flush tx buffers (%d still busy)\n",
0045              wdev->hif.tx_buffers_used);
0046         wfx_pending_dump_old_frames(wdev, 3000);
0047         /* FIXME: drop pending frames here */
0048         wdev->chip_frozen = true;
0049     }
0050     mutex_unlock(&wdev->hif_cmd.lock);
0051     wfx_tx_unlock(wdev);
0052 }
0053 
0054 void wfx_tx_lock_flush(struct wfx_dev *wdev)
0055 {
0056     wfx_tx_lock(wdev);
0057     wfx_tx_flush(wdev);
0058 }
0059 
0060 void wfx_tx_queues_init(struct wfx_vif *wvif)
0061 {
0062     /* The device is in charge to respect the details of the QoS parameters. The driver just
0063      * ensure that it roughtly respect the priorities to avoid any shortage.
0064      */
0065     const int priorities[IEEE80211_NUM_ACS] = { 1, 2, 64, 128 };
0066     int i;
0067 
0068     for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
0069         skb_queue_head_init(&wvif->tx_queue[i].normal);
0070         skb_queue_head_init(&wvif->tx_queue[i].cab);
0071         wvif->tx_queue[i].priority = priorities[i];
0072     }
0073 }
0074 
0075 bool wfx_tx_queue_empty(struct wfx_vif *wvif, struct wfx_queue *queue)
0076 {
0077     return skb_queue_empty_lockless(&queue->normal) && skb_queue_empty_lockless(&queue->cab);
0078 }
0079 
0080 void wfx_tx_queues_check_empty(struct wfx_vif *wvif)
0081 {
0082     int i;
0083 
0084     for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
0085         WARN_ON(atomic_read(&wvif->tx_queue[i].pending_frames));
0086         WARN_ON(!wfx_tx_queue_empty(wvif, &wvif->tx_queue[i]));
0087     }
0088 }
0089 
0090 static void __wfx_tx_queue_drop(struct wfx_vif *wvif,
0091                 struct sk_buff_head *skb_queue, struct sk_buff_head *dropped)
0092 {
0093     struct sk_buff *skb, *tmp;
0094 
0095     spin_lock_bh(&skb_queue->lock);
0096     skb_queue_walk_safe(skb_queue, skb, tmp) {
0097         __skb_unlink(skb, skb_queue);
0098         skb_queue_head(dropped, skb);
0099     }
0100     spin_unlock_bh(&skb_queue->lock);
0101 }
0102 
0103 void wfx_tx_queue_drop(struct wfx_vif *wvif, struct wfx_queue *queue,
0104                struct sk_buff_head *dropped)
0105 {
0106     __wfx_tx_queue_drop(wvif, &queue->cab, dropped);
0107     __wfx_tx_queue_drop(wvif, &queue->normal, dropped);
0108     wake_up(&wvif->wdev->tx_dequeue);
0109 }
0110 
0111 void wfx_tx_queues_put(struct wfx_vif *wvif, struct sk_buff *skb)
0112 {
0113     struct wfx_queue *queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
0114     struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
0115 
0116     if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
0117         skb_queue_tail(&queue->cab, skb);
0118     else
0119         skb_queue_tail(&queue->normal, skb);
0120 }
0121 
0122 void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped)
0123 {
0124     struct wfx_queue *queue;
0125     struct wfx_vif *wvif;
0126     struct wfx_hif_msg *hif;
0127     struct sk_buff *skb;
0128 
0129     WARN(!wdev->chip_frozen, "%s should only be used to recover a frozen device", __func__);
0130     while ((skb = skb_dequeue(&wdev->tx_pending)) != NULL) {
0131         hif = (struct wfx_hif_msg *)skb->data;
0132         wvif = wdev_to_wvif(wdev, hif->interface);
0133         if (wvif) {
0134             queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
0135             WARN_ON(skb_get_queue_mapping(skb) > 3);
0136             WARN_ON(!atomic_read(&queue->pending_frames));
0137             atomic_dec(&queue->pending_frames);
0138         }
0139         skb_queue_head(dropped, skb);
0140     }
0141 }
0142 
0143 struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id)
0144 {
0145     struct wfx_queue *queue;
0146     struct wfx_hif_req_tx *req;
0147     struct wfx_vif *wvif;
0148     struct wfx_hif_msg *hif;
0149     struct sk_buff *skb;
0150 
0151     spin_lock_bh(&wdev->tx_pending.lock);
0152     skb_queue_walk(&wdev->tx_pending, skb) {
0153         hif = (struct wfx_hif_msg *)skb->data;
0154         req = (struct wfx_hif_req_tx *)hif->body;
0155         if (req->packet_id != packet_id)
0156             continue;
0157         spin_unlock_bh(&wdev->tx_pending.lock);
0158         wvif = wdev_to_wvif(wdev, hif->interface);
0159         if (wvif) {
0160             queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
0161             WARN_ON(skb_get_queue_mapping(skb) > 3);
0162             WARN_ON(!atomic_read(&queue->pending_frames));
0163             atomic_dec(&queue->pending_frames);
0164         }
0165         skb_unlink(skb, &wdev->tx_pending);
0166         return skb;
0167     }
0168     spin_unlock_bh(&wdev->tx_pending.lock);
0169     WARN(1, "cannot find packet in pending queue");
0170     return NULL;
0171 }
0172 
0173 void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms)
0174 {
0175     ktime_t now = ktime_get();
0176     struct wfx_tx_priv *tx_priv;
0177     struct wfx_hif_req_tx *req;
0178     struct sk_buff *skb;
0179     bool first = true;
0180 
0181     spin_lock_bh(&wdev->tx_pending.lock);
0182     skb_queue_walk(&wdev->tx_pending, skb) {
0183         tx_priv = wfx_skb_tx_priv(skb);
0184         req = wfx_skb_txreq(skb);
0185         if (ktime_after(now, ktime_add_ms(tx_priv->xmit_timestamp, limit_ms))) {
0186             if (first) {
0187                 dev_info(wdev->dev, "frames stuck in firmware since %dms or more:\n",
0188                      limit_ms);
0189                 first = false;
0190             }
0191             dev_info(wdev->dev, "   id %08x sent %lldms ago\n",
0192                  req->packet_id, ktime_ms_delta(now, tx_priv->xmit_timestamp));
0193         }
0194     }
0195     spin_unlock_bh(&wdev->tx_pending.lock);
0196 }
0197 
0198 unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev, struct sk_buff *skb)
0199 {
0200     ktime_t now = ktime_get();
0201     struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
0202 
0203     return ktime_us_delta(now, tx_priv->xmit_timestamp);
0204 }
0205 
0206 bool wfx_tx_queues_has_cab(struct wfx_vif *wvif)
0207 {
0208     struct ieee80211_vif *vif = wvif_to_vif(wvif);
0209     int i;
0210 
0211     if (vif->type != NL80211_IFTYPE_AP)
0212         return false;
0213     for (i = 0; i < IEEE80211_NUM_ACS; ++i)
0214         /* Note: since only AP can have mcast frames in queue and only one vif can be AP,
0215          * all queued frames has same interface id
0216          */
0217         if (!skb_queue_empty_lockless(&wvif->tx_queue[i].cab))
0218             return true;
0219     return false;
0220 }
0221 
0222 static int wfx_tx_queue_get_weight(struct wfx_queue *queue)
0223 {
0224     return atomic_read(&queue->pending_frames) * queue->priority;
0225 }
0226 
0227 static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev)
0228 {
0229     struct wfx_queue *queues[IEEE80211_NUM_ACS * ARRAY_SIZE(wdev->vif)];
0230     int i, j, num_queues = 0;
0231     struct wfx_vif *wvif;
0232     struct wfx_hif_msg *hif;
0233     struct sk_buff *skb;
0234 
0235     /* sort the queues */
0236     wvif = NULL;
0237     while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
0238         for (i = 0; i < IEEE80211_NUM_ACS; i++) {
0239             WARN_ON(num_queues >= ARRAY_SIZE(queues));
0240             queues[num_queues] = &wvif->tx_queue[i];
0241             for (j = num_queues; j > 0; j--)
0242                 if (wfx_tx_queue_get_weight(queues[j]) <
0243                     wfx_tx_queue_get_weight(queues[j - 1]))
0244                     swap(queues[j - 1], queues[j]);
0245             num_queues++;
0246         }
0247     }
0248 
0249     wvif = NULL;
0250     while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
0251         if (!wvif->after_dtim_tx_allowed)
0252             continue;
0253         for (i = 0; i < num_queues; i++) {
0254             skb = skb_dequeue(&queues[i]->cab);
0255             if (!skb)
0256                 continue;
0257             /* Note: since only AP can have mcast frames in queue and only one vif can
0258              * be AP, all queued frames has same interface id
0259              */
0260             hif = (struct wfx_hif_msg *)skb->data;
0261             WARN_ON(hif->interface != wvif->id);
0262             WARN_ON(queues[i] != &wvif->tx_queue[skb_get_queue_mapping(skb)]);
0263             atomic_inc(&queues[i]->pending_frames);
0264             trace_queues_stats(wdev, queues[i]);
0265             return skb;
0266         }
0267         /* No more multicast to sent */
0268         wvif->after_dtim_tx_allowed = false;
0269         schedule_work(&wvif->update_tim_work);
0270     }
0271 
0272     for (i = 0; i < num_queues; i++) {
0273         skb = skb_dequeue(&queues[i]->normal);
0274         if (skb) {
0275             atomic_inc(&queues[i]->pending_frames);
0276             trace_queues_stats(wdev, queues[i]);
0277             return skb;
0278         }
0279     }
0280     return NULL;
0281 }
0282 
0283 struct wfx_hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
0284 {
0285     struct wfx_tx_priv *tx_priv;
0286     struct sk_buff *skb;
0287 
0288     if (atomic_read(&wdev->tx_lock))
0289         return NULL;
0290     skb = wfx_tx_queues_get_skb(wdev);
0291     if (!skb)
0292         return NULL;
0293     skb_queue_tail(&wdev->tx_pending, skb);
0294     wake_up(&wdev->tx_dequeue);
0295     tx_priv = wfx_skb_tx_priv(skb);
0296     tx_priv->xmit_timestamp = ktime_get();
0297     return (struct wfx_hif_msg *)skb->data;
0298 }