0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/mlx5/driver.h>
0034 #include <linux/mlx5/fs.h>
0035 #include <linux/rbtree.h>
0036 #include "mlx5_core.h"
0037 #include "fs_core.h"
0038 #include "fs_cmd.h"
0039
0040 #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
0041 #define MLX5_FC_BULK_QUERY_ALLOC_PERIOD msecs_to_jiffies(180 * 1000)
0042
0043 #define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
0044 #define MLX5_INIT_COUNTERS_BULK 8
0045 #define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
0046 #define MLX5_FC_POOL_USED_BUFF_RATIO 10
0047
0048 struct mlx5_fc_cache {
0049 u64 packets;
0050 u64 bytes;
0051 u64 lastuse;
0052 };
0053
0054 struct mlx5_fc {
0055 struct list_head list;
0056 struct llist_node addlist;
0057 struct llist_node dellist;
0058
0059
0060
0061
0062 u64 lastpackets;
0063 u64 lastbytes;
0064
0065 struct mlx5_fc_bulk *bulk;
0066 u32 id;
0067 bool aging;
0068
0069 struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
0070 };
0071
0072 static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev);
0073 static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool);
0074 static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool);
0075 static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc);
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109 static struct list_head *mlx5_fc_counters_lookup_next(struct mlx5_core_dev *dev,
0110 u32 id)
0111 {
0112 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
0113 unsigned long next_id = (unsigned long)id + 1;
0114 struct mlx5_fc *counter;
0115 unsigned long tmp;
0116
0117 rcu_read_lock();
0118
0119 idr_for_each_entry_continue_ul(&fc_stats->counters_idr,
0120 counter, tmp, next_id) {
0121 if (!list_empty(&counter->list))
0122 break;
0123 }
0124 rcu_read_unlock();
0125
0126 return counter ? &counter->list : &fc_stats->counters;
0127 }
0128
0129 static void mlx5_fc_stats_insert(struct mlx5_core_dev *dev,
0130 struct mlx5_fc *counter)
0131 {
0132 struct list_head *next = mlx5_fc_counters_lookup_next(dev, counter->id);
0133
0134 list_add_tail(&counter->list, next);
0135 }
0136
0137 static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
0138 struct mlx5_fc *counter)
0139 {
0140 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
0141
0142 list_del(&counter->list);
0143
0144 spin_lock(&fc_stats->counters_idr_lock);
0145 WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id));
0146 spin_unlock(&fc_stats->counters_idr_lock);
0147 }
0148
0149 static int get_init_bulk_query_len(struct mlx5_core_dev *dev)
0150 {
0151 return min_t(int, MLX5_INIT_COUNTERS_BULK,
0152 (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
0153 }
0154
0155 static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
0156 {
0157 return min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
0158 (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
0159 }
0160
0161 static void update_counter_cache(int index, u32 *bulk_raw_data,
0162 struct mlx5_fc_cache *cache)
0163 {
0164 void *stats = MLX5_ADDR_OF(query_flow_counter_out, bulk_raw_data,
0165 flow_statistics[index]);
0166 u64 packets = MLX5_GET64(traffic_counter, stats, packets);
0167 u64 bytes = MLX5_GET64(traffic_counter, stats, octets);
0168
0169 if (cache->packets == packets)
0170 return;
0171
0172 cache->packets = packets;
0173 cache->bytes = bytes;
0174 cache->lastuse = jiffies;
0175 }
0176
0177 static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
0178 struct mlx5_fc *first,
0179 u32 last_id)
0180 {
0181 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
0182 bool query_more_counters = (first->id <= last_id);
0183 int cur_bulk_len = fc_stats->bulk_query_len;
0184 u32 *data = fc_stats->bulk_query_out;
0185 struct mlx5_fc *counter = first;
0186 u32 bulk_base_id;
0187 int bulk_len;
0188 int err;
0189
0190 while (query_more_counters) {
0191
0192 bulk_base_id = counter->id & ~0x3;
0193
0194
0195 bulk_len = min_t(int, cur_bulk_len,
0196 ALIGN(last_id - bulk_base_id + 1, 4));
0197
0198 err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len,
0199 data);
0200 if (err) {
0201 mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
0202 return;
0203 }
0204 query_more_counters = false;
0205
0206 list_for_each_entry_from(counter, &fc_stats->counters, list) {
0207 int counter_index = counter->id - bulk_base_id;
0208 struct mlx5_fc_cache *cache = &counter->cache;
0209
0210 if (counter->id >= bulk_base_id + bulk_len) {
0211 query_more_counters = true;
0212 break;
0213 }
0214
0215 update_counter_cache(counter_index, data, cache);
0216 }
0217 }
0218 }
0219
0220 static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
0221 {
0222 mlx5_cmd_fc_free(dev, counter->id);
0223 kfree(counter);
0224 }
0225
0226 static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
0227 {
0228 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
0229
0230 if (counter->bulk)
0231 mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter);
0232 else
0233 mlx5_fc_free(dev, counter);
0234 }
0235
0236 static void mlx5_fc_stats_bulk_query_size_increase(struct mlx5_core_dev *dev)
0237 {
0238 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
0239 int max_bulk_len = get_max_bulk_query_len(dev);
0240 unsigned long now = jiffies;
0241 u32 *bulk_query_out_tmp;
0242 int max_out_len;
0243
0244 if (fc_stats->bulk_query_alloc_failed &&
0245 time_before(now, fc_stats->next_bulk_query_alloc))
0246 return;
0247
0248 max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
0249 bulk_query_out_tmp = kzalloc(max_out_len, GFP_KERNEL);
0250 if (!bulk_query_out_tmp) {
0251 mlx5_core_warn_once(dev,
0252 "Can't increase flow counters bulk query buffer size, insufficient memory, bulk_size(%d)\n",
0253 max_bulk_len);
0254 fc_stats->bulk_query_alloc_failed = true;
0255 fc_stats->next_bulk_query_alloc =
0256 now + MLX5_FC_BULK_QUERY_ALLOC_PERIOD;
0257 return;
0258 }
0259
0260 kfree(fc_stats->bulk_query_out);
0261 fc_stats->bulk_query_out = bulk_query_out_tmp;
0262 fc_stats->bulk_query_len = max_bulk_len;
0263 if (fc_stats->bulk_query_alloc_failed) {
0264 mlx5_core_info(dev,
0265 "Flow counters bulk query buffer size increased, bulk_size(%d)\n",
0266 max_bulk_len);
0267 fc_stats->bulk_query_alloc_failed = false;
0268 }
0269 }
0270
0271 static void mlx5_fc_stats_work(struct work_struct *work)
0272 {
0273 struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
0274 priv.fc_stats.work.work);
0275 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
0276
0277
0278
0279 struct llist_node *dellist = llist_del_all(&fc_stats->dellist);
0280 struct llist_node *addlist = llist_del_all(&fc_stats->addlist);
0281 struct mlx5_fc *counter = NULL, *last = NULL, *tmp;
0282 unsigned long now = jiffies;
0283
0284 if (addlist || !list_empty(&fc_stats->counters))
0285 queue_delayed_work(fc_stats->wq, &fc_stats->work,
0286 fc_stats->sampling_interval);
0287
0288 llist_for_each_entry(counter, addlist, addlist) {
0289 mlx5_fc_stats_insert(dev, counter);
0290 fc_stats->num_counters++;
0291 }
0292
0293 llist_for_each_entry_safe(counter, tmp, dellist, dellist) {
0294 mlx5_fc_stats_remove(dev, counter);
0295
0296 mlx5_fc_release(dev, counter);
0297 fc_stats->num_counters--;
0298 }
0299
0300 if (fc_stats->bulk_query_len < get_max_bulk_query_len(dev) &&
0301 fc_stats->num_counters > get_init_bulk_query_len(dev))
0302 mlx5_fc_stats_bulk_query_size_increase(dev);
0303
0304 if (time_before(now, fc_stats->next_query) ||
0305 list_empty(&fc_stats->counters))
0306 return;
0307 last = list_last_entry(&fc_stats->counters, struct mlx5_fc, list);
0308
0309 counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
0310 list);
0311 if (counter)
0312 mlx5_fc_stats_query_counter_range(dev, counter, last->id);
0313
0314 fc_stats->next_query = now + fc_stats->sampling_interval;
0315 }
0316
0317 static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev)
0318 {
0319 struct mlx5_fc *counter;
0320 int err;
0321
0322 counter = kzalloc(sizeof(*counter), GFP_KERNEL);
0323 if (!counter)
0324 return ERR_PTR(-ENOMEM);
0325
0326 err = mlx5_cmd_fc_alloc(dev, &counter->id);
0327 if (err) {
0328 kfree(counter);
0329 return ERR_PTR(err);
0330 }
0331
0332 return counter;
0333 }
0334
0335 static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging)
0336 {
0337 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
0338 struct mlx5_fc *counter;
0339
0340 if (aging && MLX5_CAP_GEN(dev, flow_counter_bulk_alloc) != 0) {
0341 counter = mlx5_fc_pool_acquire_counter(&fc_stats->fc_pool);
0342 if (!IS_ERR(counter))
0343 return counter;
0344 }
0345
0346 return mlx5_fc_single_alloc(dev);
0347 }
0348
0349 struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging)
0350 {
0351 struct mlx5_fc *counter = mlx5_fc_acquire(dev, aging);
0352 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
0353 int err;
0354
0355 if (IS_ERR(counter))
0356 return counter;
0357
0358 INIT_LIST_HEAD(&counter->list);
0359 counter->aging = aging;
0360
0361 if (aging) {
0362 u32 id = counter->id;
0363
0364 counter->cache.lastuse = jiffies;
0365 counter->lastbytes = counter->cache.bytes;
0366 counter->lastpackets = counter->cache.packets;
0367
0368 idr_preload(GFP_KERNEL);
0369 spin_lock(&fc_stats->counters_idr_lock);
0370
0371 err = idr_alloc_u32(&fc_stats->counters_idr, counter, &id, id,
0372 GFP_NOWAIT);
0373
0374 spin_unlock(&fc_stats->counters_idr_lock);
0375 idr_preload_end();
0376 if (err)
0377 goto err_out_alloc;
0378
0379 llist_add(&counter->addlist, &fc_stats->addlist);
0380 }
0381
0382 return counter;
0383
0384 err_out_alloc:
0385 mlx5_fc_release(dev, counter);
0386 return ERR_PTR(err);
0387 }
0388
0389 struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
0390 {
0391 struct mlx5_fc *counter = mlx5_fc_create_ex(dev, aging);
0392 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
0393
0394 if (aging)
0395 mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
0396 return counter;
0397 }
0398 EXPORT_SYMBOL(mlx5_fc_create);
0399
0400 u32 mlx5_fc_id(struct mlx5_fc *counter)
0401 {
0402 return counter->id;
0403 }
0404 EXPORT_SYMBOL(mlx5_fc_id);
0405
0406 void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
0407 {
0408 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
0409
0410 if (!counter)
0411 return;
0412
0413 if (counter->aging) {
0414 llist_add(&counter->dellist, &fc_stats->dellist);
0415 mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
0416 return;
0417 }
0418
0419 mlx5_fc_release(dev, counter);
0420 }
0421 EXPORT_SYMBOL(mlx5_fc_destroy);
0422
0423 int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
0424 {
0425 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
0426 int init_bulk_len;
0427 int init_out_len;
0428
0429 spin_lock_init(&fc_stats->counters_idr_lock);
0430 idr_init(&fc_stats->counters_idr);
0431 INIT_LIST_HEAD(&fc_stats->counters);
0432 init_llist_head(&fc_stats->addlist);
0433 init_llist_head(&fc_stats->dellist);
0434
0435 init_bulk_len = get_init_bulk_query_len(dev);
0436 init_out_len = mlx5_cmd_fc_get_bulk_query_out_len(init_bulk_len);
0437 fc_stats->bulk_query_out = kzalloc(init_out_len, GFP_KERNEL);
0438 if (!fc_stats->bulk_query_out)
0439 return -ENOMEM;
0440 fc_stats->bulk_query_len = init_bulk_len;
0441
0442 fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
0443 if (!fc_stats->wq)
0444 goto err_wq_create;
0445
0446 fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
0447 INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
0448
0449 mlx5_fc_pool_init(&fc_stats->fc_pool, dev);
0450 return 0;
0451
0452 err_wq_create:
0453 kfree(fc_stats->bulk_query_out);
0454 return -ENOMEM;
0455 }
0456
0457 void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
0458 {
0459 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
0460 struct llist_node *tmplist;
0461 struct mlx5_fc *counter;
0462 struct mlx5_fc *tmp;
0463
0464 cancel_delayed_work_sync(&dev->priv.fc_stats.work);
0465 destroy_workqueue(dev->priv.fc_stats.wq);
0466 dev->priv.fc_stats.wq = NULL;
0467
0468 tmplist = llist_del_all(&fc_stats->addlist);
0469 llist_for_each_entry_safe(counter, tmp, tmplist, addlist)
0470 mlx5_fc_release(dev, counter);
0471
0472 list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list)
0473 mlx5_fc_release(dev, counter);
0474
0475 mlx5_fc_pool_cleanup(&fc_stats->fc_pool);
0476 idr_destroy(&fc_stats->counters_idr);
0477 kfree(fc_stats->bulk_query_out);
0478 }
0479
0480 int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
0481 u64 *packets, u64 *bytes)
0482 {
0483 return mlx5_cmd_fc_query(dev, counter->id, packets, bytes);
0484 }
0485 EXPORT_SYMBOL(mlx5_fc_query);
0486
0487 u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter)
0488 {
0489 return counter->cache.lastuse;
0490 }
0491
0492 void mlx5_fc_query_cached(struct mlx5_fc *counter,
0493 u64 *bytes, u64 *packets, u64 *lastuse)
0494 {
0495 struct mlx5_fc_cache c;
0496
0497 c = counter->cache;
0498
0499 *bytes = c.bytes - counter->lastbytes;
0500 *packets = c.packets - counter->lastpackets;
0501 *lastuse = c.lastuse;
0502
0503 counter->lastbytes = c.bytes;
0504 counter->lastpackets = c.packets;
0505 }
0506
0507 void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
0508 struct delayed_work *dwork,
0509 unsigned long delay)
0510 {
0511 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
0512
0513 queue_delayed_work(fc_stats->wq, dwork, delay);
0514 }
0515
0516 void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
0517 unsigned long interval)
0518 {
0519 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
0520
0521 fc_stats->sampling_interval = min_t(unsigned long, interval,
0522 fc_stats->sampling_interval);
0523 }
0524
0525
0526
0527 struct mlx5_fc_bulk {
0528 struct list_head pool_list;
0529 u32 base_id;
0530 int bulk_len;
0531 unsigned long *bitmask;
0532 struct mlx5_fc fcs[];
0533 };
0534
0535 static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
0536 u32 id)
0537 {
0538 counter->bulk = bulk;
0539 counter->id = id;
0540 }
0541
0542 static int mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk)
0543 {
0544 return bitmap_weight(bulk->bitmask, bulk->bulk_len);
0545 }
0546
0547 static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
0548 {
0549 enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask;
0550 struct mlx5_fc_bulk *bulk;
0551 int err = -ENOMEM;
0552 int bulk_len;
0553 u32 base_id;
0554 int i;
0555
0556 alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
0557 bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
0558
0559 bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL);
0560 if (!bulk)
0561 goto err_alloc_bulk;
0562
0563 bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
0564 GFP_KERNEL);
0565 if (!bulk->bitmask)
0566 goto err_alloc_bitmask;
0567
0568 err = mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id);
0569 if (err)
0570 goto err_mlx5_cmd_bulk_alloc;
0571
0572 bulk->base_id = base_id;
0573 bulk->bulk_len = bulk_len;
0574 for (i = 0; i < bulk_len; i++) {
0575 mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i);
0576 set_bit(i, bulk->bitmask);
0577 }
0578
0579 return bulk;
0580
0581 err_mlx5_cmd_bulk_alloc:
0582 kvfree(bulk->bitmask);
0583 err_alloc_bitmask:
0584 kvfree(bulk);
0585 err_alloc_bulk:
0586 return ERR_PTR(err);
0587 }
0588
0589 static int
0590 mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk)
0591 {
0592 if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) {
0593 mlx5_core_err(dev, "Freeing bulk before all counters were released\n");
0594 return -EBUSY;
0595 }
0596
0597 mlx5_cmd_fc_free(dev, bulk->base_id);
0598 kvfree(bulk->bitmask);
0599 kvfree(bulk);
0600
0601 return 0;
0602 }
0603
0604 static struct mlx5_fc *mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk)
0605 {
0606 int free_fc_index = find_first_bit(bulk->bitmask, bulk->bulk_len);
0607
0608 if (free_fc_index >= bulk->bulk_len)
0609 return ERR_PTR(-ENOSPC);
0610
0611 clear_bit(free_fc_index, bulk->bitmask);
0612 return &bulk->fcs[free_fc_index];
0613 }
0614
0615 static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc)
0616 {
0617 int fc_index = fc->id - bulk->base_id;
0618
0619 if (test_bit(fc_index, bulk->bitmask))
0620 return -EINVAL;
0621
0622 set_bit(fc_index, bulk->bitmask);
0623 return 0;
0624 }
0625
0626
0627
0628 static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev)
0629 {
0630 fc_pool->dev = dev;
0631 mutex_init(&fc_pool->pool_lock);
0632 INIT_LIST_HEAD(&fc_pool->fully_used);
0633 INIT_LIST_HEAD(&fc_pool->partially_used);
0634 INIT_LIST_HEAD(&fc_pool->unused);
0635 fc_pool->available_fcs = 0;
0636 fc_pool->used_fcs = 0;
0637 fc_pool->threshold = 0;
0638 }
0639
0640 static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool)
0641 {
0642 struct mlx5_core_dev *dev = fc_pool->dev;
0643 struct mlx5_fc_bulk *bulk;
0644 struct mlx5_fc_bulk *tmp;
0645
0646 list_for_each_entry_safe(bulk, tmp, &fc_pool->fully_used, pool_list)
0647 mlx5_fc_bulk_destroy(dev, bulk);
0648 list_for_each_entry_safe(bulk, tmp, &fc_pool->partially_used, pool_list)
0649 mlx5_fc_bulk_destroy(dev, bulk);
0650 list_for_each_entry_safe(bulk, tmp, &fc_pool->unused, pool_list)
0651 mlx5_fc_bulk_destroy(dev, bulk);
0652 }
0653
0654 static void mlx5_fc_pool_update_threshold(struct mlx5_fc_pool *fc_pool)
0655 {
0656 fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
0657 fc_pool->used_fcs / MLX5_FC_POOL_USED_BUFF_RATIO);
0658 }
0659
0660 static struct mlx5_fc_bulk *
0661 mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool *fc_pool)
0662 {
0663 struct mlx5_core_dev *dev = fc_pool->dev;
0664 struct mlx5_fc_bulk *new_bulk;
0665
0666 new_bulk = mlx5_fc_bulk_create(dev);
0667 if (!IS_ERR(new_bulk))
0668 fc_pool->available_fcs += new_bulk->bulk_len;
0669 mlx5_fc_pool_update_threshold(fc_pool);
0670 return new_bulk;
0671 }
0672
0673 static void
0674 mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk)
0675 {
0676 struct mlx5_core_dev *dev = fc_pool->dev;
0677
0678 fc_pool->available_fcs -= bulk->bulk_len;
0679 mlx5_fc_bulk_destroy(dev, bulk);
0680 mlx5_fc_pool_update_threshold(fc_pool);
0681 }
0682
0683 static struct mlx5_fc *
0684 mlx5_fc_pool_acquire_from_list(struct list_head *src_list,
0685 struct list_head *next_list,
0686 bool move_non_full_bulk)
0687 {
0688 struct mlx5_fc_bulk *bulk;
0689 struct mlx5_fc *fc;
0690
0691 if (list_empty(src_list))
0692 return ERR_PTR(-ENODATA);
0693
0694 bulk = list_first_entry(src_list, struct mlx5_fc_bulk, pool_list);
0695 fc = mlx5_fc_bulk_acquire_fc(bulk);
0696 if (move_non_full_bulk || mlx5_fc_bulk_get_free_fcs_amount(bulk) == 0)
0697 list_move(&bulk->pool_list, next_list);
0698 return fc;
0699 }
0700
0701 static struct mlx5_fc *
0702 mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool)
0703 {
0704 struct mlx5_fc_bulk *new_bulk;
0705 struct mlx5_fc *fc;
0706
0707 mutex_lock(&fc_pool->pool_lock);
0708
0709 fc = mlx5_fc_pool_acquire_from_list(&fc_pool->partially_used,
0710 &fc_pool->fully_used, false);
0711 if (IS_ERR(fc))
0712 fc = mlx5_fc_pool_acquire_from_list(&fc_pool->unused,
0713 &fc_pool->partially_used,
0714 true);
0715 if (IS_ERR(fc)) {
0716 new_bulk = mlx5_fc_pool_alloc_new_bulk(fc_pool);
0717 if (IS_ERR(new_bulk)) {
0718 fc = ERR_CAST(new_bulk);
0719 goto out;
0720 }
0721 fc = mlx5_fc_bulk_acquire_fc(new_bulk);
0722 list_add(&new_bulk->pool_list, &fc_pool->partially_used);
0723 }
0724 fc_pool->available_fcs--;
0725 fc_pool->used_fcs++;
0726
0727 out:
0728 mutex_unlock(&fc_pool->pool_lock);
0729 return fc;
0730 }
0731
0732 static void
0733 mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc)
0734 {
0735 struct mlx5_core_dev *dev = fc_pool->dev;
0736 struct mlx5_fc_bulk *bulk = fc->bulk;
0737 int bulk_free_fcs_amount;
0738
0739 mutex_lock(&fc_pool->pool_lock);
0740
0741 if (mlx5_fc_bulk_release_fc(bulk, fc)) {
0742 mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
0743 goto unlock;
0744 }
0745
0746 fc_pool->available_fcs++;
0747 fc_pool->used_fcs--;
0748
0749 bulk_free_fcs_amount = mlx5_fc_bulk_get_free_fcs_amount(bulk);
0750 if (bulk_free_fcs_amount == 1)
0751 list_move_tail(&bulk->pool_list, &fc_pool->partially_used);
0752 if (bulk_free_fcs_amount == bulk->bulk_len) {
0753 list_del(&bulk->pool_list);
0754 if (fc_pool->available_fcs > fc_pool->threshold)
0755 mlx5_fc_pool_free_bulk(fc_pool, bulk);
0756 else
0757 list_add(&bulk->pool_list, &fc_pool->unused);
0758 }
0759
0760 unlock:
0761 mutex_unlock(&fc_pool->pool_lock);
0762 }