Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
0004  *
0005  * Copyright (c) 2010, ST-Ericsson
0006  * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
0007  */
0008 
0009 #include <net/mac80211.h>
0010 #include <linux/sched.h>
0011 #include <linux/jiffies.h>
0012 #include "queue.h"
0013 #include "cw1200.h"
0014 #include "debug.h"
0015 
0016 /* private */ struct cw1200_queue_item
0017 {
0018     struct list_head    head;
0019     struct sk_buff      *skb;
0020     u32         packet_id;
0021     unsigned long       queue_timestamp;
0022     unsigned long       xmit_timestamp;
0023     struct cw1200_txpriv    txpriv;
0024     u8          generation;
0025 };
0026 
0027 static inline void __cw1200_queue_lock(struct cw1200_queue *queue)
0028 {
0029     struct cw1200_queue_stats *stats = queue->stats;
0030     if (queue->tx_locked_cnt++ == 0) {
0031         pr_debug("[TX] Queue %d is locked.\n",
0032              queue->queue_id);
0033         ieee80211_stop_queue(stats->priv->hw, queue->queue_id);
0034     }
0035 }
0036 
0037 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue)
0038 {
0039     struct cw1200_queue_stats *stats = queue->stats;
0040     BUG_ON(!queue->tx_locked_cnt);
0041     if (--queue->tx_locked_cnt == 0) {
0042         pr_debug("[TX] Queue %d is unlocked.\n",
0043              queue->queue_id);
0044         ieee80211_wake_queue(stats->priv->hw, queue->queue_id);
0045     }
0046 }
0047 
0048 static inline void cw1200_queue_parse_id(u32 packet_id, u8 *queue_generation,
0049                      u8 *queue_id, u8 *item_generation,
0050                      u8 *item_id)
0051 {
0052     *item_id        = (packet_id >>  0) & 0xFF;
0053     *item_generation    = (packet_id >>  8) & 0xFF;
0054     *queue_id       = (packet_id >> 16) & 0xFF;
0055     *queue_generation   = (packet_id >> 24) & 0xFF;
0056 }
0057 
0058 static inline u32 cw1200_queue_mk_packet_id(u8 queue_generation, u8 queue_id,
0059                         u8 item_generation, u8 item_id)
0060 {
0061     return ((u32)item_id << 0) |
0062         ((u32)item_generation << 8) |
0063         ((u32)queue_id << 16) |
0064         ((u32)queue_generation << 24);
0065 }
0066 
0067 static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats,
0068                  struct list_head *gc_list)
0069 {
0070     struct cw1200_queue_item *item, *tmp;
0071 
0072     list_for_each_entry_safe(item, tmp, gc_list, head) {
0073         list_del(&item->head);
0074         stats->skb_dtor(stats->priv, item->skb, &item->txpriv);
0075         kfree(item);
0076     }
0077 }
0078 
0079 static void cw1200_queue_register_post_gc(struct list_head *gc_list,
0080                       struct cw1200_queue_item *item)
0081 {
0082     struct cw1200_queue_item *gc_item;
0083     gc_item = kmemdup(item, sizeof(struct cw1200_queue_item),
0084             GFP_ATOMIC);
0085     BUG_ON(!gc_item);
0086     list_add_tail(&gc_item->head, gc_list);
0087 }
0088 
0089 static void __cw1200_queue_gc(struct cw1200_queue *queue,
0090                   struct list_head *head,
0091                   bool unlock)
0092 {
0093     struct cw1200_queue_stats *stats = queue->stats;
0094     struct cw1200_queue_item *item = NULL, *tmp;
0095     bool wakeup_stats = false;
0096 
0097     list_for_each_entry_safe(item, tmp, &queue->queue, head) {
0098         if (time_is_after_jiffies(item->queue_timestamp + queue->ttl))
0099             break;
0100         --queue->num_queued;
0101         --queue->link_map_cache[item->txpriv.link_id];
0102         spin_lock_bh(&stats->lock);
0103         --stats->num_queued;
0104         if (!--stats->link_map_cache[item->txpriv.link_id])
0105             wakeup_stats = true;
0106         spin_unlock_bh(&stats->lock);
0107         cw1200_debug_tx_ttl(stats->priv);
0108         cw1200_queue_register_post_gc(head, item);
0109         item->skb = NULL;
0110         list_move_tail(&item->head, &queue->free_pool);
0111     }
0112 
0113     if (wakeup_stats)
0114         wake_up(&stats->wait_link_id_empty);
0115 
0116     if (queue->overfull) {
0117         if (queue->num_queued <= (queue->capacity >> 1)) {
0118             queue->overfull = false;
0119             if (unlock)
0120                 __cw1200_queue_unlock(queue);
0121         } else if (item) {
0122             unsigned long tmo = item->queue_timestamp + queue->ttl;
0123             mod_timer(&queue->gc, tmo);
0124             cw1200_pm_stay_awake(&stats->priv->pm_state,
0125                          tmo - jiffies);
0126         }
0127     }
0128 }
0129 
0130 static void cw1200_queue_gc(struct timer_list *t)
0131 {
0132     LIST_HEAD(list);
0133     struct cw1200_queue *queue =
0134         from_timer(queue, t, gc);
0135 
0136     spin_lock_bh(&queue->lock);
0137     __cw1200_queue_gc(queue, &list, true);
0138     spin_unlock_bh(&queue->lock);
0139     cw1200_queue_post_gc(queue->stats, &list);
0140 }
0141 
0142 int cw1200_queue_stats_init(struct cw1200_queue_stats *stats,
0143                 size_t map_capacity,
0144                 cw1200_queue_skb_dtor_t skb_dtor,
0145                 struct cw1200_common *priv)
0146 {
0147     memset(stats, 0, sizeof(*stats));
0148     stats->map_capacity = map_capacity;
0149     stats->skb_dtor = skb_dtor;
0150     stats->priv = priv;
0151     spin_lock_init(&stats->lock);
0152     init_waitqueue_head(&stats->wait_link_id_empty);
0153 
0154     stats->link_map_cache = kcalloc(map_capacity, sizeof(int),
0155                     GFP_KERNEL);
0156     if (!stats->link_map_cache)
0157         return -ENOMEM;
0158 
0159     return 0;
0160 }
0161 
0162 int cw1200_queue_init(struct cw1200_queue *queue,
0163               struct cw1200_queue_stats *stats,
0164               u8 queue_id,
0165               size_t capacity,
0166               unsigned long ttl)
0167 {
0168     size_t i;
0169 
0170     memset(queue, 0, sizeof(*queue));
0171     queue->stats = stats;
0172     queue->capacity = capacity;
0173     queue->queue_id = queue_id;
0174     queue->ttl = ttl;
0175     INIT_LIST_HEAD(&queue->queue);
0176     INIT_LIST_HEAD(&queue->pending);
0177     INIT_LIST_HEAD(&queue->free_pool);
0178     spin_lock_init(&queue->lock);
0179     timer_setup(&queue->gc, cw1200_queue_gc, 0);
0180 
0181     queue->pool = kcalloc(capacity, sizeof(struct cw1200_queue_item),
0182                   GFP_KERNEL);
0183     if (!queue->pool)
0184         return -ENOMEM;
0185 
0186     queue->link_map_cache = kcalloc(stats->map_capacity, sizeof(int),
0187                     GFP_KERNEL);
0188     if (!queue->link_map_cache) {
0189         kfree(queue->pool);
0190         queue->pool = NULL;
0191         return -ENOMEM;
0192     }
0193 
0194     for (i = 0; i < capacity; ++i)
0195         list_add_tail(&queue->pool[i].head, &queue->free_pool);
0196 
0197     return 0;
0198 }
0199 
0200 int cw1200_queue_clear(struct cw1200_queue *queue)
0201 {
0202     int i;
0203     LIST_HEAD(gc_list);
0204     struct cw1200_queue_stats *stats = queue->stats;
0205     struct cw1200_queue_item *item, *tmp;
0206 
0207     spin_lock_bh(&queue->lock);
0208     queue->generation++;
0209     list_splice_tail_init(&queue->queue, &queue->pending);
0210     list_for_each_entry_safe(item, tmp, &queue->pending, head) {
0211         WARN_ON(!item->skb);
0212         cw1200_queue_register_post_gc(&gc_list, item);
0213         item->skb = NULL;
0214         list_move_tail(&item->head, &queue->free_pool);
0215     }
0216     queue->num_queued = 0;
0217     queue->num_pending = 0;
0218 
0219     spin_lock_bh(&stats->lock);
0220     for (i = 0; i < stats->map_capacity; ++i) {
0221         stats->num_queued -= queue->link_map_cache[i];
0222         stats->link_map_cache[i] -= queue->link_map_cache[i];
0223         queue->link_map_cache[i] = 0;
0224     }
0225     spin_unlock_bh(&stats->lock);
0226     if (queue->overfull) {
0227         queue->overfull = false;
0228         __cw1200_queue_unlock(queue);
0229     }
0230     spin_unlock_bh(&queue->lock);
0231     wake_up(&stats->wait_link_id_empty);
0232     cw1200_queue_post_gc(stats, &gc_list);
0233     return 0;
0234 }
0235 
0236 void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats)
0237 {
0238     kfree(stats->link_map_cache);
0239     stats->link_map_cache = NULL;
0240 }
0241 
0242 void cw1200_queue_deinit(struct cw1200_queue *queue)
0243 {
0244     cw1200_queue_clear(queue);
0245     del_timer_sync(&queue->gc);
0246     INIT_LIST_HEAD(&queue->free_pool);
0247     kfree(queue->pool);
0248     kfree(queue->link_map_cache);
0249     queue->pool = NULL;
0250     queue->link_map_cache = NULL;
0251     queue->capacity = 0;
0252 }
0253 
0254 size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
0255                    u32 link_id_map)
0256 {
0257     size_t ret;
0258     int i, bit;
0259     size_t map_capacity = queue->stats->map_capacity;
0260 
0261     if (!link_id_map)
0262         return 0;
0263 
0264     spin_lock_bh(&queue->lock);
0265     if (link_id_map == (u32)-1) {
0266         ret = queue->num_queued - queue->num_pending;
0267     } else {
0268         ret = 0;
0269         for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) {
0270             if (link_id_map & bit)
0271                 ret += queue->link_map_cache[i];
0272         }
0273     }
0274     spin_unlock_bh(&queue->lock);
0275     return ret;
0276 }
0277 
0278 int cw1200_queue_put(struct cw1200_queue *queue,
0279              struct sk_buff *skb,
0280              struct cw1200_txpriv *txpriv)
0281 {
0282     int ret = 0;
0283     struct cw1200_queue_stats *stats = queue->stats;
0284 
0285     if (txpriv->link_id >= queue->stats->map_capacity)
0286         return -EINVAL;
0287 
0288     spin_lock_bh(&queue->lock);
0289     if (!WARN_ON(list_empty(&queue->free_pool))) {
0290         struct cw1200_queue_item *item = list_first_entry(
0291             &queue->free_pool, struct cw1200_queue_item, head);
0292         BUG_ON(item->skb);
0293 
0294         list_move_tail(&item->head, &queue->queue);
0295         item->skb = skb;
0296         item->txpriv = *txpriv;
0297         item->generation = 0;
0298         item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
0299                                 queue->queue_id,
0300                                 item->generation,
0301                                 item - queue->pool);
0302         item->queue_timestamp = jiffies;
0303 
0304         ++queue->num_queued;
0305         ++queue->link_map_cache[txpriv->link_id];
0306 
0307         spin_lock_bh(&stats->lock);
0308         ++stats->num_queued;
0309         ++stats->link_map_cache[txpriv->link_id];
0310         spin_unlock_bh(&stats->lock);
0311 
0312         /* TX may happen in parallel sometimes.
0313          * Leave extra queue slots so we don't overflow.
0314          */
0315         if (queue->overfull == false &&
0316             queue->num_queued >=
0317             (queue->capacity - (num_present_cpus() - 1))) {
0318             queue->overfull = true;
0319             __cw1200_queue_lock(queue);
0320             mod_timer(&queue->gc, jiffies);
0321         }
0322     } else {
0323         ret = -ENOENT;
0324     }
0325     spin_unlock_bh(&queue->lock);
0326     return ret;
0327 }
0328 
0329 int cw1200_queue_get(struct cw1200_queue *queue,
0330              u32 link_id_map,
0331              struct wsm_tx **tx,
0332              struct ieee80211_tx_info **tx_info,
0333              const struct cw1200_txpriv **txpriv)
0334 {
0335     int ret = -ENOENT;
0336     struct cw1200_queue_item *item;
0337     struct cw1200_queue_stats *stats = queue->stats;
0338     bool wakeup_stats = false;
0339 
0340     spin_lock_bh(&queue->lock);
0341     list_for_each_entry(item, &queue->queue, head) {
0342         if (link_id_map & BIT(item->txpriv.link_id)) {
0343             ret = 0;
0344             break;
0345         }
0346     }
0347 
0348     if (!WARN_ON(ret)) {
0349         *tx = (struct wsm_tx *)item->skb->data;
0350         *tx_info = IEEE80211_SKB_CB(item->skb);
0351         *txpriv = &item->txpriv;
0352         (*tx)->packet_id = item->packet_id;
0353         list_move_tail(&item->head, &queue->pending);
0354         ++queue->num_pending;
0355         --queue->link_map_cache[item->txpriv.link_id];
0356         item->xmit_timestamp = jiffies;
0357 
0358         spin_lock_bh(&stats->lock);
0359         --stats->num_queued;
0360         if (!--stats->link_map_cache[item->txpriv.link_id])
0361             wakeup_stats = true;
0362         spin_unlock_bh(&stats->lock);
0363     }
0364     spin_unlock_bh(&queue->lock);
0365     if (wakeup_stats)
0366         wake_up(&stats->wait_link_id_empty);
0367     return ret;
0368 }
0369 
0370 int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id)
0371 {
0372     int ret = 0;
0373     u8 queue_generation, queue_id, item_generation, item_id;
0374     struct cw1200_queue_item *item;
0375     struct cw1200_queue_stats *stats = queue->stats;
0376 
0377     cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
0378                   &item_generation, &item_id);
0379 
0380     item = &queue->pool[item_id];
0381 
0382     spin_lock_bh(&queue->lock);
0383     BUG_ON(queue_id != queue->queue_id);
0384     if (queue_generation != queue->generation) {
0385         ret = -ENOENT;
0386     } else if (item_id >= (unsigned) queue->capacity) {
0387         WARN_ON(1);
0388         ret = -EINVAL;
0389     } else if (item->generation != item_generation) {
0390         WARN_ON(1);
0391         ret = -ENOENT;
0392     } else {
0393         --queue->num_pending;
0394         ++queue->link_map_cache[item->txpriv.link_id];
0395 
0396         spin_lock_bh(&stats->lock);
0397         ++stats->num_queued;
0398         ++stats->link_map_cache[item->txpriv.link_id];
0399         spin_unlock_bh(&stats->lock);
0400 
0401         item->generation = ++item_generation;
0402         item->packet_id = cw1200_queue_mk_packet_id(queue_generation,
0403                                 queue_id,
0404                                 item_generation,
0405                                 item_id);
0406         list_move(&item->head, &queue->queue);
0407     }
0408     spin_unlock_bh(&queue->lock);
0409     return ret;
0410 }
0411 
0412 int cw1200_queue_requeue_all(struct cw1200_queue *queue)
0413 {
0414     struct cw1200_queue_item *item, *tmp;
0415     struct cw1200_queue_stats *stats = queue->stats;
0416     spin_lock_bh(&queue->lock);
0417 
0418     list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) {
0419         --queue->num_pending;
0420         ++queue->link_map_cache[item->txpriv.link_id];
0421 
0422         spin_lock_bh(&stats->lock);
0423         ++stats->num_queued;
0424         ++stats->link_map_cache[item->txpriv.link_id];
0425         spin_unlock_bh(&stats->lock);
0426 
0427         ++item->generation;
0428         item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
0429                                 queue->queue_id,
0430                                 item->generation,
0431                                 item - queue->pool);
0432         list_move(&item->head, &queue->queue);
0433     }
0434     spin_unlock_bh(&queue->lock);
0435 
0436     return 0;
0437 }
0438 
0439 int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id)
0440 {
0441     int ret = 0;
0442     u8 queue_generation, queue_id, item_generation, item_id;
0443     struct cw1200_queue_item *item;
0444     struct cw1200_queue_stats *stats = queue->stats;
0445     struct sk_buff *gc_skb = NULL;
0446     struct cw1200_txpriv gc_txpriv;
0447 
0448     cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
0449                   &item_generation, &item_id);
0450 
0451     item = &queue->pool[item_id];
0452 
0453     spin_lock_bh(&queue->lock);
0454     BUG_ON(queue_id != queue->queue_id);
0455     if (queue_generation != queue->generation) {
0456         ret = -ENOENT;
0457     } else if (item_id >= (unsigned) queue->capacity) {
0458         WARN_ON(1);
0459         ret = -EINVAL;
0460     } else if (item->generation != item_generation) {
0461         WARN_ON(1);
0462         ret = -ENOENT;
0463     } else {
0464         gc_txpriv = item->txpriv;
0465         gc_skb = item->skb;
0466         item->skb = NULL;
0467         --queue->num_pending;
0468         --queue->num_queued;
0469         ++queue->num_sent;
0470         ++item->generation;
0471         /* Do not use list_move_tail here, but list_move:
0472          * try to utilize cache row.
0473          */
0474         list_move(&item->head, &queue->free_pool);
0475 
0476         if (queue->overfull &&
0477             (queue->num_queued <= (queue->capacity >> 1))) {
0478             queue->overfull = false;
0479             __cw1200_queue_unlock(queue);
0480         }
0481     }
0482     spin_unlock_bh(&queue->lock);
0483 
0484     if (gc_skb)
0485         stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv);
0486 
0487     return ret;
0488 }
0489 
0490 int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id,
0491              struct sk_buff **skb,
0492              const struct cw1200_txpriv **txpriv)
0493 {
0494     int ret = 0;
0495     u8 queue_generation, queue_id, item_generation, item_id;
0496     struct cw1200_queue_item *item;
0497     cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
0498                   &item_generation, &item_id);
0499 
0500     item = &queue->pool[item_id];
0501 
0502     spin_lock_bh(&queue->lock);
0503     BUG_ON(queue_id != queue->queue_id);
0504     if (queue_generation != queue->generation) {
0505         ret = -ENOENT;
0506     } else if (item_id >= (unsigned) queue->capacity) {
0507         WARN_ON(1);
0508         ret = -EINVAL;
0509     } else if (item->generation != item_generation) {
0510         WARN_ON(1);
0511         ret = -ENOENT;
0512     } else {
0513         *skb = item->skb;
0514         *txpriv = &item->txpriv;
0515     }
0516     spin_unlock_bh(&queue->lock);
0517     return ret;
0518 }
0519 
0520 void cw1200_queue_lock(struct cw1200_queue *queue)
0521 {
0522     spin_lock_bh(&queue->lock);
0523     __cw1200_queue_lock(queue);
0524     spin_unlock_bh(&queue->lock);
0525 }
0526 
0527 void cw1200_queue_unlock(struct cw1200_queue *queue)
0528 {
0529     spin_lock_bh(&queue->lock);
0530     __cw1200_queue_unlock(queue);
0531     spin_unlock_bh(&queue->lock);
0532 }
0533 
0534 bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue,
0535                      unsigned long *timestamp,
0536                      u32 pending_frame_id)
0537 {
0538     struct cw1200_queue_item *item;
0539     bool ret;
0540 
0541     spin_lock_bh(&queue->lock);
0542     ret = !list_empty(&queue->pending);
0543     if (ret) {
0544         list_for_each_entry(item, &queue->pending, head) {
0545             if (item->packet_id != pending_frame_id)
0546                 if (time_before(item->xmit_timestamp,
0547                         *timestamp))
0548                     *timestamp = item->xmit_timestamp;
0549         }
0550     }
0551     spin_unlock_bh(&queue->lock);
0552     return ret;
0553 }
0554 
0555 bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats,
0556                  u32 link_id_map)
0557 {
0558     bool empty = true;
0559 
0560     spin_lock_bh(&stats->lock);
0561     if (link_id_map == (u32)-1) {
0562         empty = stats->num_queued == 0;
0563     } else {
0564         int i;
0565         for (i = 0; i < stats->map_capacity; ++i) {
0566             if (link_id_map & BIT(i)) {
0567                 if (stats->link_map_cache[i]) {
0568                     empty = false;
0569                     break;
0570                 }
0571             }
0572         }
0573     }
0574     spin_unlock_bh(&stats->lock);
0575 
0576     return empty;
0577 }