Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2016 Facebook
0004  * Copyright (C) 2013-2014 Jens Axboe
0005  */
0006 
0007 #include <linux/sched.h>
0008 #include <linux/random.h>
0009 #include <linux/sbitmap.h>
0010 #include <linux/seq_file.h>
0011 
0012 static int init_alloc_hint(struct sbitmap *sb, gfp_t flags)
0013 {
0014     unsigned depth = sb->depth;
0015 
0016     sb->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
0017     if (!sb->alloc_hint)
0018         return -ENOMEM;
0019 
0020     if (depth && !sb->round_robin) {
0021         int i;
0022 
0023         for_each_possible_cpu(i)
0024             *per_cpu_ptr(sb->alloc_hint, i) = prandom_u32() % depth;
0025     }
0026     return 0;
0027 }
0028 
0029 static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb,
0030                             unsigned int depth)
0031 {
0032     unsigned hint;
0033 
0034     hint = this_cpu_read(*sb->alloc_hint);
0035     if (unlikely(hint >= depth)) {
0036         hint = depth ? prandom_u32() % depth : 0;
0037         this_cpu_write(*sb->alloc_hint, hint);
0038     }
0039 
0040     return hint;
0041 }
0042 
0043 static inline void update_alloc_hint_after_get(struct sbitmap *sb,
0044                            unsigned int depth,
0045                            unsigned int hint,
0046                            unsigned int nr)
0047 {
0048     if (nr == -1) {
0049         /* If the map is full, a hint won't do us much good. */
0050         this_cpu_write(*sb->alloc_hint, 0);
0051     } else if (nr == hint || unlikely(sb->round_robin)) {
0052         /* Only update the hint if we used it. */
0053         hint = nr + 1;
0054         if (hint >= depth - 1)
0055             hint = 0;
0056         this_cpu_write(*sb->alloc_hint, hint);
0057     }
0058 }
0059 
0060 /*
0061  * See if we have deferred clears that we can batch move
0062  */
0063 static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
0064 {
0065     unsigned long mask;
0066 
0067     if (!READ_ONCE(map->cleared))
0068         return false;
0069 
0070     /*
0071      * First get a stable cleared mask, setting the old mask to 0.
0072      */
0073     mask = xchg(&map->cleared, 0);
0074 
0075     /*
0076      * Now clear the masked bits in our free word
0077      */
0078     atomic_long_andnot(mask, (atomic_long_t *)&map->word);
0079     BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word));
0080     return true;
0081 }
0082 
0083 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
0084               gfp_t flags, int node, bool round_robin,
0085               bool alloc_hint)
0086 {
0087     unsigned int bits_per_word;
0088 
0089     if (shift < 0)
0090         shift = sbitmap_calculate_shift(depth);
0091 
0092     bits_per_word = 1U << shift;
0093     if (bits_per_word > BITS_PER_LONG)
0094         return -EINVAL;
0095 
0096     sb->shift = shift;
0097     sb->depth = depth;
0098     sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
0099     sb->round_robin = round_robin;
0100 
0101     if (depth == 0) {
0102         sb->map = NULL;
0103         return 0;
0104     }
0105 
0106     if (alloc_hint) {
0107         if (init_alloc_hint(sb, flags))
0108             return -ENOMEM;
0109     } else {
0110         sb->alloc_hint = NULL;
0111     }
0112 
0113     sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
0114     if (!sb->map) {
0115         free_percpu(sb->alloc_hint);
0116         return -ENOMEM;
0117     }
0118 
0119     return 0;
0120 }
0121 EXPORT_SYMBOL_GPL(sbitmap_init_node);
0122 
0123 void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
0124 {
0125     unsigned int bits_per_word = 1U << sb->shift;
0126     unsigned int i;
0127 
0128     for (i = 0; i < sb->map_nr; i++)
0129         sbitmap_deferred_clear(&sb->map[i]);
0130 
0131     sb->depth = depth;
0132     sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
0133 }
0134 EXPORT_SYMBOL_GPL(sbitmap_resize);
0135 
0136 static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
0137                   unsigned int hint, bool wrap)
0138 {
0139     int nr;
0140 
0141     /* don't wrap if starting from 0 */
0142     wrap = wrap && hint;
0143 
0144     while (1) {
0145         nr = find_next_zero_bit(word, depth, hint);
0146         if (unlikely(nr >= depth)) {
0147             /*
0148              * We started with an offset, and we didn't reset the
0149              * offset to 0 in a failure case, so start from 0 to
0150              * exhaust the map.
0151              */
0152             if (hint && wrap) {
0153                 hint = 0;
0154                 continue;
0155             }
0156             return -1;
0157         }
0158 
0159         if (!test_and_set_bit_lock(nr, word))
0160             break;
0161 
0162         hint = nr + 1;
0163         if (hint >= depth - 1)
0164             hint = 0;
0165     }
0166 
0167     return nr;
0168 }
0169 
0170 static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
0171                      unsigned int alloc_hint)
0172 {
0173     struct sbitmap_word *map = &sb->map[index];
0174     int nr;
0175 
0176     do {
0177         nr = __sbitmap_get_word(&map->word, __map_depth(sb, index),
0178                     alloc_hint, !sb->round_robin);
0179         if (nr != -1)
0180             break;
0181         if (!sbitmap_deferred_clear(map))
0182             break;
0183     } while (1);
0184 
0185     return nr;
0186 }
0187 
0188 static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint)
0189 {
0190     unsigned int i, index;
0191     int nr = -1;
0192 
0193     index = SB_NR_TO_INDEX(sb, alloc_hint);
0194 
0195     /*
0196      * Unless we're doing round robin tag allocation, just use the
0197      * alloc_hint to find the right word index. No point in looping
0198      * twice in find_next_zero_bit() for that case.
0199      */
0200     if (sb->round_robin)
0201         alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
0202     else
0203         alloc_hint = 0;
0204 
0205     for (i = 0; i < sb->map_nr; i++) {
0206         nr = sbitmap_find_bit_in_index(sb, index, alloc_hint);
0207         if (nr != -1) {
0208             nr += index << sb->shift;
0209             break;
0210         }
0211 
0212         /* Jump to next index. */
0213         alloc_hint = 0;
0214         if (++index >= sb->map_nr)
0215             index = 0;
0216     }
0217 
0218     return nr;
0219 }
0220 
0221 int sbitmap_get(struct sbitmap *sb)
0222 {
0223     int nr;
0224     unsigned int hint, depth;
0225 
0226     if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
0227         return -1;
0228 
0229     depth = READ_ONCE(sb->depth);
0230     hint = update_alloc_hint_before_get(sb, depth);
0231     nr = __sbitmap_get(sb, hint);
0232     update_alloc_hint_after_get(sb, depth, hint, nr);
0233 
0234     return nr;
0235 }
0236 EXPORT_SYMBOL_GPL(sbitmap_get);
0237 
0238 static int __sbitmap_get_shallow(struct sbitmap *sb,
0239                  unsigned int alloc_hint,
0240                  unsigned long shallow_depth)
0241 {
0242     unsigned int i, index;
0243     int nr = -1;
0244 
0245     index = SB_NR_TO_INDEX(sb, alloc_hint);
0246 
0247     for (i = 0; i < sb->map_nr; i++) {
0248 again:
0249         nr = __sbitmap_get_word(&sb->map[index].word,
0250                     min_t(unsigned int,
0251                           __map_depth(sb, index),
0252                           shallow_depth),
0253                     SB_NR_TO_BIT(sb, alloc_hint), true);
0254         if (nr != -1) {
0255             nr += index << sb->shift;
0256             break;
0257         }
0258 
0259         if (sbitmap_deferred_clear(&sb->map[index]))
0260             goto again;
0261 
0262         /* Jump to next index. */
0263         index++;
0264         alloc_hint = index << sb->shift;
0265 
0266         if (index >= sb->map_nr) {
0267             index = 0;
0268             alloc_hint = 0;
0269         }
0270     }
0271 
0272     return nr;
0273 }
0274 
0275 int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth)
0276 {
0277     int nr;
0278     unsigned int hint, depth;
0279 
0280     if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
0281         return -1;
0282 
0283     depth = READ_ONCE(sb->depth);
0284     hint = update_alloc_hint_before_get(sb, depth);
0285     nr = __sbitmap_get_shallow(sb, hint, shallow_depth);
0286     update_alloc_hint_after_get(sb, depth, hint, nr);
0287 
0288     return nr;
0289 }
0290 EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
0291 
0292 bool sbitmap_any_bit_set(const struct sbitmap *sb)
0293 {
0294     unsigned int i;
0295 
0296     for (i = 0; i < sb->map_nr; i++) {
0297         if (sb->map[i].word & ~sb->map[i].cleared)
0298             return true;
0299     }
0300     return false;
0301 }
0302 EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
0303 
0304 static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
0305 {
0306     unsigned int i, weight = 0;
0307 
0308     for (i = 0; i < sb->map_nr; i++) {
0309         const struct sbitmap_word *word = &sb->map[i];
0310         unsigned int word_depth = __map_depth(sb, i);
0311 
0312         if (set)
0313             weight += bitmap_weight(&word->word, word_depth);
0314         else
0315             weight += bitmap_weight(&word->cleared, word_depth);
0316     }
0317     return weight;
0318 }
0319 
0320 static unsigned int sbitmap_cleared(const struct sbitmap *sb)
0321 {
0322     return __sbitmap_weight(sb, false);
0323 }
0324 
0325 unsigned int sbitmap_weight(const struct sbitmap *sb)
0326 {
0327     return __sbitmap_weight(sb, true) - sbitmap_cleared(sb);
0328 }
0329 EXPORT_SYMBOL_GPL(sbitmap_weight);
0330 
0331 void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
0332 {
0333     seq_printf(m, "depth=%u\n", sb->depth);
0334     seq_printf(m, "busy=%u\n", sbitmap_weight(sb));
0335     seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
0336     seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
0337     seq_printf(m, "map_nr=%u\n", sb->map_nr);
0338 }
0339 EXPORT_SYMBOL_GPL(sbitmap_show);
0340 
0341 static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
0342 {
0343     if ((offset & 0xf) == 0) {
0344         if (offset != 0)
0345             seq_putc(m, '\n');
0346         seq_printf(m, "%08x:", offset);
0347     }
0348     if ((offset & 0x1) == 0)
0349         seq_putc(m, ' ');
0350     seq_printf(m, "%02x", byte);
0351 }
0352 
0353 void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
0354 {
0355     u8 byte = 0;
0356     unsigned int byte_bits = 0;
0357     unsigned int offset = 0;
0358     int i;
0359 
0360     for (i = 0; i < sb->map_nr; i++) {
0361         unsigned long word = READ_ONCE(sb->map[i].word);
0362         unsigned long cleared = READ_ONCE(sb->map[i].cleared);
0363         unsigned int word_bits = __map_depth(sb, i);
0364 
0365         word &= ~cleared;
0366 
0367         while (word_bits > 0) {
0368             unsigned int bits = min(8 - byte_bits, word_bits);
0369 
0370             byte |= (word & (BIT(bits) - 1)) << byte_bits;
0371             byte_bits += bits;
0372             if (byte_bits == 8) {
0373                 emit_byte(m, offset, byte);
0374                 byte = 0;
0375                 byte_bits = 0;
0376                 offset++;
0377             }
0378             word >>= bits;
0379             word_bits -= bits;
0380         }
0381     }
0382     if (byte_bits) {
0383         emit_byte(m, offset, byte);
0384         offset++;
0385     }
0386     if (offset)
0387         seq_putc(m, '\n');
0388 }
0389 EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
0390 
0391 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
0392                     unsigned int depth)
0393 {
0394     unsigned int wake_batch;
0395     unsigned int shallow_depth;
0396 
0397     /*
0398      * For each batch, we wake up one queue. We need to make sure that our
0399      * batch size is small enough that the full depth of the bitmap,
0400      * potentially limited by a shallow depth, is enough to wake up all of
0401      * the queues.
0402      *
0403      * Each full word of the bitmap has bits_per_word bits, and there might
0404      * be a partial word. There are depth / bits_per_word full words and
0405      * depth % bits_per_word bits left over. In bitwise arithmetic:
0406      *
0407      * bits_per_word = 1 << shift
0408      * depth / bits_per_word = depth >> shift
0409      * depth % bits_per_word = depth & ((1 << shift) - 1)
0410      *
0411      * Each word can be limited to sbq->min_shallow_depth bits.
0412      */
0413     shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
0414     depth = ((depth >> sbq->sb.shift) * shallow_depth +
0415          min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
0416     wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
0417                  SBQ_WAKE_BATCH);
0418 
0419     return wake_batch;
0420 }
0421 
0422 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
0423                 int shift, bool round_robin, gfp_t flags, int node)
0424 {
0425     int ret;
0426     int i;
0427 
0428     ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node,
0429                 round_robin, true);
0430     if (ret)
0431         return ret;
0432 
0433     sbq->min_shallow_depth = UINT_MAX;
0434     sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
0435     atomic_set(&sbq->wake_index, 0);
0436     atomic_set(&sbq->ws_active, 0);
0437 
0438     sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
0439     if (!sbq->ws) {
0440         sbitmap_free(&sbq->sb);
0441         return -ENOMEM;
0442     }
0443 
0444     for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
0445         init_waitqueue_head(&sbq->ws[i].wait);
0446         atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
0447     }
0448 
0449     return 0;
0450 }
0451 EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
0452 
0453 static inline void __sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
0454                         unsigned int wake_batch)
0455 {
0456     int i;
0457 
0458     if (sbq->wake_batch != wake_batch) {
0459         WRITE_ONCE(sbq->wake_batch, wake_batch);
0460         /*
0461          * Pairs with the memory barrier in sbitmap_queue_wake_up()
0462          * to ensure that the batch size is updated before the wait
0463          * counts.
0464          */
0465         smp_mb();
0466         for (i = 0; i < SBQ_WAIT_QUEUES; i++)
0467             atomic_set(&sbq->ws[i].wait_cnt, 1);
0468     }
0469 }
0470 
0471 static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
0472                         unsigned int depth)
0473 {
0474     unsigned int wake_batch;
0475 
0476     wake_batch = sbq_calc_wake_batch(sbq, depth);
0477     __sbitmap_queue_update_wake_batch(sbq, wake_batch);
0478 }
0479 
0480 void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
0481                         unsigned int users)
0482 {
0483     unsigned int wake_batch;
0484     unsigned int min_batch;
0485     unsigned int depth = (sbq->sb.depth + users - 1) / users;
0486 
0487     min_batch = sbq->sb.depth >= (4 * SBQ_WAIT_QUEUES) ? 4 : 1;
0488 
0489     wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES,
0490             min_batch, SBQ_WAKE_BATCH);
0491     __sbitmap_queue_update_wake_batch(sbq, wake_batch);
0492 }
0493 EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch);
0494 
0495 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
0496 {
0497     sbitmap_queue_update_wake_batch(sbq, depth);
0498     sbitmap_resize(&sbq->sb, depth);
0499 }
0500 EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
0501 
0502 int __sbitmap_queue_get(struct sbitmap_queue *sbq)
0503 {
0504     return sbitmap_get(&sbq->sb);
0505 }
0506 EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
0507 
0508 unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
0509                     unsigned int *offset)
0510 {
0511     struct sbitmap *sb = &sbq->sb;
0512     unsigned int hint, depth;
0513     unsigned long index, nr;
0514     int i;
0515 
0516     if (unlikely(sb->round_robin))
0517         return 0;
0518 
0519     depth = READ_ONCE(sb->depth);
0520     hint = update_alloc_hint_before_get(sb, depth);
0521 
0522     index = SB_NR_TO_INDEX(sb, hint);
0523 
0524     for (i = 0; i < sb->map_nr; i++) {
0525         struct sbitmap_word *map = &sb->map[index];
0526         unsigned long get_mask;
0527         unsigned int map_depth = __map_depth(sb, index);
0528 
0529         sbitmap_deferred_clear(map);
0530         if (map->word == (1UL << (map_depth - 1)) - 1)
0531             goto next;
0532 
0533         nr = find_first_zero_bit(&map->word, map_depth);
0534         if (nr + nr_tags <= map_depth) {
0535             atomic_long_t *ptr = (atomic_long_t *) &map->word;
0536             int map_tags = min_t(int, nr_tags, map_depth);
0537             unsigned long val, ret;
0538 
0539             get_mask = ((1UL << map_tags) - 1) << nr;
0540             do {
0541                 val = READ_ONCE(map->word);
0542                 if ((val & ~get_mask) != val)
0543                     goto next;
0544                 ret = atomic_long_cmpxchg(ptr, val, get_mask | val);
0545             } while (ret != val);
0546             get_mask = (get_mask & ~ret) >> nr;
0547             if (get_mask) {
0548                 *offset = nr + (index << sb->shift);
0549                 update_alloc_hint_after_get(sb, depth, hint,
0550                             *offset + map_tags - 1);
0551                 return get_mask;
0552             }
0553         }
0554 next:
0555         /* Jump to next index. */
0556         if (++index >= sb->map_nr)
0557             index = 0;
0558     }
0559 
0560     return 0;
0561 }
0562 
0563 int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
0564                   unsigned int shallow_depth)
0565 {
0566     WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
0567 
0568     return sbitmap_get_shallow(&sbq->sb, shallow_depth);
0569 }
0570 EXPORT_SYMBOL_GPL(sbitmap_queue_get_shallow);
0571 
0572 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
0573                      unsigned int min_shallow_depth)
0574 {
0575     sbq->min_shallow_depth = min_shallow_depth;
0576     sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
0577 }
0578 EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
0579 
0580 static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
0581 {
0582     int i, wake_index;
0583 
0584     if (!atomic_read(&sbq->ws_active))
0585         return NULL;
0586 
0587     wake_index = atomic_read(&sbq->wake_index);
0588     for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
0589         struct sbq_wait_state *ws = &sbq->ws[wake_index];
0590 
0591         if (waitqueue_active(&ws->wait)) {
0592             if (wake_index != atomic_read(&sbq->wake_index))
0593                 atomic_set(&sbq->wake_index, wake_index);
0594             return ws;
0595         }
0596 
0597         wake_index = sbq_index_inc(wake_index);
0598     }
0599 
0600     return NULL;
0601 }
0602 
0603 static bool __sbq_wake_up(struct sbitmap_queue *sbq)
0604 {
0605     struct sbq_wait_state *ws;
0606     unsigned int wake_batch;
0607     int wait_cnt;
0608 
0609     ws = sbq_wake_ptr(sbq);
0610     if (!ws)
0611         return false;
0612 
0613     wait_cnt = atomic_dec_return(&ws->wait_cnt);
0614     if (wait_cnt <= 0) {
0615         int ret;
0616 
0617         wake_batch = READ_ONCE(sbq->wake_batch);
0618 
0619         /*
0620          * Pairs with the memory barrier in sbitmap_queue_resize() to
0621          * ensure that we see the batch size update before the wait
0622          * count is reset.
0623          */
0624         smp_mb__before_atomic();
0625 
0626         /*
0627          * For concurrent callers of this, the one that failed the
0628          * atomic_cmpxhcg() race should call this function again
0629          * to wakeup a new batch on a different 'ws'.
0630          */
0631         ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch);
0632         if (ret == wait_cnt) {
0633             sbq_index_atomic_inc(&sbq->wake_index);
0634             wake_up_nr(&ws->wait, wake_batch);
0635             return false;
0636         }
0637 
0638         return true;
0639     }
0640 
0641     return false;
0642 }
0643 
0644 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
0645 {
0646     while (__sbq_wake_up(sbq))
0647         ;
0648 }
0649 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
0650 
0651 static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag)
0652 {
0653     if (likely(!sb->round_robin && tag < sb->depth))
0654         data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag);
0655 }
0656 
0657 void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
0658                 int *tags, int nr_tags)
0659 {
0660     struct sbitmap *sb = &sbq->sb;
0661     unsigned long *addr = NULL;
0662     unsigned long mask = 0;
0663     int i;
0664 
0665     smp_mb__before_atomic();
0666     for (i = 0; i < nr_tags; i++) {
0667         const int tag = tags[i] - offset;
0668         unsigned long *this_addr;
0669 
0670         /* since we're clearing a batch, skip the deferred map */
0671         this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word;
0672         if (!addr) {
0673             addr = this_addr;
0674         } else if (addr != this_addr) {
0675             atomic_long_andnot(mask, (atomic_long_t *) addr);
0676             mask = 0;
0677             addr = this_addr;
0678         }
0679         mask |= (1UL << SB_NR_TO_BIT(sb, tag));
0680     }
0681 
0682     if (mask)
0683         atomic_long_andnot(mask, (atomic_long_t *) addr);
0684 
0685     smp_mb__after_atomic();
0686     sbitmap_queue_wake_up(sbq);
0687     sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(),
0688                     tags[nr_tags - 1] - offset);
0689 }
0690 
0691 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
0692              unsigned int cpu)
0693 {
0694     /*
0695      * Once the clear bit is set, the bit may be allocated out.
0696      *
0697      * Orders READ/WRITE on the associated instance(such as request
0698      * of blk_mq) by this bit for avoiding race with re-allocation,
0699      * and its pair is the memory barrier implied in __sbitmap_get_word.
0700      *
0701      * One invariant is that the clear bit has to be zero when the bit
0702      * is in use.
0703      */
0704     smp_mb__before_atomic();
0705     sbitmap_deferred_clear_bit(&sbq->sb, nr);
0706 
0707     /*
0708      * Pairs with the memory barrier in set_current_state() to ensure the
0709      * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
0710      * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
0711      * waiter. See the comment on waitqueue_active().
0712      */
0713     smp_mb__after_atomic();
0714     sbitmap_queue_wake_up(sbq);
0715     sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
0716 }
0717 EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
0718 
0719 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
0720 {
0721     int i, wake_index;
0722 
0723     /*
0724      * Pairs with the memory barrier in set_current_state() like in
0725      * sbitmap_queue_wake_up().
0726      */
0727     smp_mb();
0728     wake_index = atomic_read(&sbq->wake_index);
0729     for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
0730         struct sbq_wait_state *ws = &sbq->ws[wake_index];
0731 
0732         if (waitqueue_active(&ws->wait))
0733             wake_up(&ws->wait);
0734 
0735         wake_index = sbq_index_inc(wake_index);
0736     }
0737 }
0738 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
0739 
0740 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
0741 {
0742     bool first;
0743     int i;
0744 
0745     sbitmap_show(&sbq->sb, m);
0746 
0747     seq_puts(m, "alloc_hint={");
0748     first = true;
0749     for_each_possible_cpu(i) {
0750         if (!first)
0751             seq_puts(m, ", ");
0752         first = false;
0753         seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i));
0754     }
0755     seq_puts(m, "}\n");
0756 
0757     seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
0758     seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
0759     seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
0760 
0761     seq_puts(m, "ws={\n");
0762     for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
0763         struct sbq_wait_state *ws = &sbq->ws[i];
0764 
0765         seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
0766                atomic_read(&ws->wait_cnt),
0767                waitqueue_active(&ws->wait) ? "active" : "inactive");
0768     }
0769     seq_puts(m, "}\n");
0770 
0771     seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin);
0772     seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
0773 }
0774 EXPORT_SYMBOL_GPL(sbitmap_queue_show);
0775 
0776 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
0777                 struct sbq_wait_state *ws,
0778                 struct sbq_wait *sbq_wait)
0779 {
0780     if (!sbq_wait->sbq) {
0781         sbq_wait->sbq = sbq;
0782         atomic_inc(&sbq->ws_active);
0783         add_wait_queue(&ws->wait, &sbq_wait->wait);
0784     }
0785 }
0786 EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
0787 
0788 void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait)
0789 {
0790     list_del_init(&sbq_wait->wait.entry);
0791     if (sbq_wait->sbq) {
0792         atomic_dec(&sbq_wait->sbq->ws_active);
0793         sbq_wait->sbq = NULL;
0794     }
0795 }
0796 EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue);
0797 
0798 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
0799                  struct sbq_wait_state *ws,
0800                  struct sbq_wait *sbq_wait, int state)
0801 {
0802     if (!sbq_wait->sbq) {
0803         atomic_inc(&sbq->ws_active);
0804         sbq_wait->sbq = sbq;
0805     }
0806     prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
0807 }
0808 EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
0809 
0810 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
0811              struct sbq_wait *sbq_wait)
0812 {
0813     finish_wait(&ws->wait, &sbq_wait->wait);
0814     if (sbq_wait->sbq) {
0815         atomic_dec(&sbq->ws_active);
0816         sbq_wait->sbq = NULL;
0817     }
0818 }
0819 EXPORT_SYMBOL_GPL(sbitmap_finish_wait);