0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/kernel.h>
0034 #include <linux/slab.h>
0035 #include <linux/rculist.h>
0036 #include <linux/llist.h>
0037
0038 #include "rds_single_path.h"
0039 #include "ib_mr.h"
0040 #include "rds.h"
0041
0042 struct workqueue_struct *rds_ib_mr_wq;
0043 struct rds_ib_dereg_odp_mr {
0044 struct work_struct work;
0045 struct ib_mr *mr;
0046 };
0047
0048 static void rds_ib_odp_mr_worker(struct work_struct *work);
0049
0050 static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
0051 {
0052 struct rds_ib_device *rds_ibdev;
0053 struct rds_ib_ipaddr *i_ipaddr;
0054
0055 rcu_read_lock();
0056 list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
0057 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
0058 if (i_ipaddr->ipaddr == ipaddr) {
0059 refcount_inc(&rds_ibdev->refcount);
0060 rcu_read_unlock();
0061 return rds_ibdev;
0062 }
0063 }
0064 }
0065 rcu_read_unlock();
0066
0067 return NULL;
0068 }
0069
0070 static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
0071 {
0072 struct rds_ib_ipaddr *i_ipaddr;
0073
0074 i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
0075 if (!i_ipaddr)
0076 return -ENOMEM;
0077
0078 i_ipaddr->ipaddr = ipaddr;
0079
0080 spin_lock_irq(&rds_ibdev->spinlock);
0081 list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
0082 spin_unlock_irq(&rds_ibdev->spinlock);
0083
0084 return 0;
0085 }
0086
0087 static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
0088 {
0089 struct rds_ib_ipaddr *i_ipaddr;
0090 struct rds_ib_ipaddr *to_free = NULL;
0091
0092
0093 spin_lock_irq(&rds_ibdev->spinlock);
0094 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
0095 if (i_ipaddr->ipaddr == ipaddr) {
0096 list_del_rcu(&i_ipaddr->list);
0097 to_free = i_ipaddr;
0098 break;
0099 }
0100 }
0101 spin_unlock_irq(&rds_ibdev->spinlock);
0102
0103 if (to_free)
0104 kfree_rcu(to_free, rcu);
0105 }
0106
0107 int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev,
0108 struct in6_addr *ipaddr)
0109 {
0110 struct rds_ib_device *rds_ibdev_old;
0111
0112 rds_ibdev_old = rds_ib_get_device(ipaddr->s6_addr32[3]);
0113 if (!rds_ibdev_old)
0114 return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
0115
0116 if (rds_ibdev_old != rds_ibdev) {
0117 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr->s6_addr32[3]);
0118 rds_ib_dev_put(rds_ibdev_old);
0119 return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
0120 }
0121 rds_ib_dev_put(rds_ibdev_old);
0122
0123 return 0;
0124 }
0125
0126 void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
0127 {
0128 struct rds_ib_connection *ic = conn->c_transport_data;
0129
0130
0131 spin_lock_irq(&ib_nodev_conns_lock);
0132 BUG_ON(list_empty(&ib_nodev_conns));
0133 BUG_ON(list_empty(&ic->ib_node));
0134 list_del(&ic->ib_node);
0135
0136 spin_lock(&rds_ibdev->spinlock);
0137 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
0138 spin_unlock(&rds_ibdev->spinlock);
0139 spin_unlock_irq(&ib_nodev_conns_lock);
0140
0141 ic->rds_ibdev = rds_ibdev;
0142 refcount_inc(&rds_ibdev->refcount);
0143 }
0144
0145 void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
0146 {
0147 struct rds_ib_connection *ic = conn->c_transport_data;
0148
0149
0150 spin_lock(&ib_nodev_conns_lock);
0151
0152 spin_lock_irq(&rds_ibdev->spinlock);
0153 BUG_ON(list_empty(&ic->ib_node));
0154 list_del(&ic->ib_node);
0155 spin_unlock_irq(&rds_ibdev->spinlock);
0156
0157 list_add_tail(&ic->ib_node, &ib_nodev_conns);
0158
0159 spin_unlock(&ib_nodev_conns_lock);
0160
0161 ic->rds_ibdev = NULL;
0162 rds_ib_dev_put(rds_ibdev);
0163 }
0164
0165 void rds_ib_destroy_nodev_conns(void)
0166 {
0167 struct rds_ib_connection *ic, *_ic;
0168 LIST_HEAD(tmp_list);
0169
0170
0171 spin_lock_irq(&ib_nodev_conns_lock);
0172 list_splice(&ib_nodev_conns, &tmp_list);
0173 spin_unlock_irq(&ib_nodev_conns_lock);
0174
0175 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
0176 rds_conn_destroy(ic->conn);
0177 }
0178
0179 void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
0180 {
0181 struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
0182
0183 iinfo->rdma_mr_max = pool_1m->max_items;
0184 iinfo->rdma_mr_size = pool_1m->max_pages;
0185 }
0186
0187 #if IS_ENABLED(CONFIG_IPV6)
0188 void rds6_ib_get_mr_info(struct rds_ib_device *rds_ibdev,
0189 struct rds6_info_rdma_connection *iinfo6)
0190 {
0191 struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
0192
0193 iinfo6->rdma_mr_max = pool_1m->max_items;
0194 iinfo6->rdma_mr_size = pool_1m->max_pages;
0195 }
0196 #endif
0197
0198 struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool)
0199 {
0200 struct rds_ib_mr *ibmr = NULL;
0201 struct llist_node *ret;
0202 unsigned long flags;
0203
0204 spin_lock_irqsave(&pool->clean_lock, flags);
0205 ret = llist_del_first(&pool->clean_list);
0206 spin_unlock_irqrestore(&pool->clean_lock, flags);
0207 if (ret) {
0208 ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
0209 if (pool->pool_type == RDS_IB_MR_8K_POOL)
0210 rds_ib_stats_inc(s_ib_rdma_mr_8k_reused);
0211 else
0212 rds_ib_stats_inc(s_ib_rdma_mr_1m_reused);
0213 }
0214
0215 return ibmr;
0216 }
0217
0218 void rds_ib_sync_mr(void *trans_private, int direction)
0219 {
0220 struct rds_ib_mr *ibmr = trans_private;
0221 struct rds_ib_device *rds_ibdev = ibmr->device;
0222
0223 if (ibmr->odp)
0224 return;
0225
0226 switch (direction) {
0227 case DMA_FROM_DEVICE:
0228 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
0229 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
0230 break;
0231 case DMA_TO_DEVICE:
0232 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
0233 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
0234 break;
0235 }
0236 }
0237
0238 void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
0239 {
0240 struct rds_ib_device *rds_ibdev = ibmr->device;
0241
0242 if (ibmr->sg_dma_len) {
0243 ib_dma_unmap_sg(rds_ibdev->dev,
0244 ibmr->sg, ibmr->sg_len,
0245 DMA_BIDIRECTIONAL);
0246 ibmr->sg_dma_len = 0;
0247 }
0248
0249
0250 if (ibmr->sg_len) {
0251 unsigned int i;
0252
0253 for (i = 0; i < ibmr->sg_len; ++i) {
0254 struct page *page = sg_page(&ibmr->sg[i]);
0255
0256
0257
0258 WARN_ON(!page->mapping && irqs_disabled());
0259 set_page_dirty(page);
0260 put_page(page);
0261 }
0262 kfree(ibmr->sg);
0263
0264 ibmr->sg = NULL;
0265 ibmr->sg_len = 0;
0266 }
0267 }
0268
0269 void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
0270 {
0271 unsigned int pinned = ibmr->sg_len;
0272
0273 __rds_ib_teardown_mr(ibmr);
0274 if (pinned) {
0275 struct rds_ib_mr_pool *pool = ibmr->pool;
0276
0277 atomic_sub(pinned, &pool->free_pinned);
0278 }
0279 }
0280
0281 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
0282 {
0283 unsigned int item_count;
0284
0285 item_count = atomic_read(&pool->item_count);
0286 if (free_all)
0287 return item_count;
0288
0289 return 0;
0290 }
0291
0292
0293
0294
0295 static unsigned int llist_append_to_list(struct llist_head *llist,
0296 struct list_head *list)
0297 {
0298 struct rds_ib_mr *ibmr;
0299 struct llist_node *node;
0300 struct llist_node *next;
0301 unsigned int count = 0;
0302
0303 node = llist_del_all(llist);
0304 while (node) {
0305 next = node->next;
0306 ibmr = llist_entry(node, struct rds_ib_mr, llnode);
0307 list_add_tail(&ibmr->unmap_list, list);
0308 node = next;
0309 count++;
0310 }
0311 return count;
0312 }
0313
0314
0315
0316
0317
0318
0319 static void list_to_llist_nodes(struct list_head *list,
0320 struct llist_node **nodes_head,
0321 struct llist_node **nodes_tail)
0322 {
0323 struct rds_ib_mr *ibmr;
0324 struct llist_node *cur = NULL;
0325 struct llist_node **next = nodes_head;
0326
0327 list_for_each_entry(ibmr, list, unmap_list) {
0328 cur = &ibmr->llnode;
0329 *next = cur;
0330 next = &cur->next;
0331 }
0332 *next = NULL;
0333 *nodes_tail = cur;
0334 }
0335
0336
0337
0338
0339
0340
0341
0342 int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
0343 int free_all, struct rds_ib_mr **ibmr_ret)
0344 {
0345 struct rds_ib_mr *ibmr;
0346 struct llist_node *clean_nodes;
0347 struct llist_node *clean_tail;
0348 LIST_HEAD(unmap_list);
0349 unsigned long unpinned = 0;
0350 unsigned int nfreed = 0, dirty_to_clean = 0, free_goal;
0351
0352 if (pool->pool_type == RDS_IB_MR_8K_POOL)
0353 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_flush);
0354 else
0355 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_flush);
0356
0357 if (ibmr_ret) {
0358 DEFINE_WAIT(wait);
0359 while (!mutex_trylock(&pool->flush_lock)) {
0360 ibmr = rds_ib_reuse_mr(pool);
0361 if (ibmr) {
0362 *ibmr_ret = ibmr;
0363 finish_wait(&pool->flush_wait, &wait);
0364 goto out_nolock;
0365 }
0366
0367 prepare_to_wait(&pool->flush_wait, &wait,
0368 TASK_UNINTERRUPTIBLE);
0369 if (llist_empty(&pool->clean_list))
0370 schedule();
0371
0372 ibmr = rds_ib_reuse_mr(pool);
0373 if (ibmr) {
0374 *ibmr_ret = ibmr;
0375 finish_wait(&pool->flush_wait, &wait);
0376 goto out_nolock;
0377 }
0378 }
0379 finish_wait(&pool->flush_wait, &wait);
0380 } else
0381 mutex_lock(&pool->flush_lock);
0382
0383 if (ibmr_ret) {
0384 ibmr = rds_ib_reuse_mr(pool);
0385 if (ibmr) {
0386 *ibmr_ret = ibmr;
0387 goto out;
0388 }
0389 }
0390
0391
0392
0393
0394 dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
0395 dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
0396 if (free_all) {
0397 unsigned long flags;
0398
0399 spin_lock_irqsave(&pool->clean_lock, flags);
0400 llist_append_to_list(&pool->clean_list, &unmap_list);
0401 spin_unlock_irqrestore(&pool->clean_lock, flags);
0402 }
0403
0404 free_goal = rds_ib_flush_goal(pool, free_all);
0405
0406 if (list_empty(&unmap_list))
0407 goto out;
0408
0409 rds_ib_unreg_frmr(&unmap_list, &nfreed, &unpinned, free_goal);
0410
0411 if (!list_empty(&unmap_list)) {
0412 unsigned long flags;
0413
0414 list_to_llist_nodes(&unmap_list, &clean_nodes, &clean_tail);
0415 if (ibmr_ret) {
0416 *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
0417 clean_nodes = clean_nodes->next;
0418 }
0419
0420 if (clean_nodes) {
0421 spin_lock_irqsave(&pool->clean_lock, flags);
0422 llist_add_batch(clean_nodes, clean_tail,
0423 &pool->clean_list);
0424 spin_unlock_irqrestore(&pool->clean_lock, flags);
0425 }
0426 }
0427
0428 atomic_sub(unpinned, &pool->free_pinned);
0429 atomic_sub(dirty_to_clean, &pool->dirty_count);
0430 atomic_sub(nfreed, &pool->item_count);
0431
0432 out:
0433 mutex_unlock(&pool->flush_lock);
0434 if (waitqueue_active(&pool->flush_wait))
0435 wake_up(&pool->flush_wait);
0436 out_nolock:
0437 return 0;
0438 }
0439
0440 struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
0441 {
0442 struct rds_ib_mr *ibmr = NULL;
0443 int iter = 0;
0444
0445 while (1) {
0446 ibmr = rds_ib_reuse_mr(pool);
0447 if (ibmr)
0448 return ibmr;
0449
0450 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
0451 break;
0452
0453 atomic_dec(&pool->item_count);
0454
0455 if (++iter > 2) {
0456 if (pool->pool_type == RDS_IB_MR_8K_POOL)
0457 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted);
0458 else
0459 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted);
0460 break;
0461 }
0462
0463
0464 if (pool->pool_type == RDS_IB_MR_8K_POOL)
0465 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait);
0466 else
0467 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait);
0468
0469 rds_ib_flush_mr_pool(pool, 0, &ibmr);
0470 if (ibmr)
0471 return ibmr;
0472 }
0473
0474 return NULL;
0475 }
0476
0477 static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
0478 {
0479 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
0480
0481 rds_ib_flush_mr_pool(pool, 0, NULL);
0482 }
0483
0484 void rds_ib_free_mr(void *trans_private, int invalidate)
0485 {
0486 struct rds_ib_mr *ibmr = trans_private;
0487 struct rds_ib_mr_pool *pool = ibmr->pool;
0488 struct rds_ib_device *rds_ibdev = ibmr->device;
0489
0490 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
0491
0492 if (ibmr->odp) {
0493
0494
0495
0496
0497 INIT_DELAYED_WORK(&ibmr->work, rds_ib_odp_mr_worker);
0498 queue_delayed_work(rds_ib_mr_wq, &ibmr->work, 0);
0499 return;
0500 }
0501
0502
0503 rds_ib_free_frmr_list(ibmr);
0504
0505 atomic_add(ibmr->sg_len, &pool->free_pinned);
0506 atomic_inc(&pool->dirty_count);
0507
0508
0509 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
0510 atomic_read(&pool->dirty_count) >= pool->max_items / 5)
0511 queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
0512
0513 if (invalidate) {
0514 if (likely(!in_interrupt())) {
0515 rds_ib_flush_mr_pool(pool, 0, NULL);
0516 } else {
0517
0518
0519
0520 queue_delayed_work(rds_ib_mr_wq,
0521 &pool->flush_worker, 10);
0522 }
0523 }
0524
0525 rds_ib_dev_put(rds_ibdev);
0526 }
0527
0528 void rds_ib_flush_mrs(void)
0529 {
0530 struct rds_ib_device *rds_ibdev;
0531
0532 down_read(&rds_ib_devices_lock);
0533 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
0534 if (rds_ibdev->mr_8k_pool)
0535 rds_ib_flush_mr_pool(rds_ibdev->mr_8k_pool, 0, NULL);
0536
0537 if (rds_ibdev->mr_1m_pool)
0538 rds_ib_flush_mr_pool(rds_ibdev->mr_1m_pool, 0, NULL);
0539 }
0540 up_read(&rds_ib_devices_lock);
0541 }
0542
0543 u32 rds_ib_get_lkey(void *trans_private)
0544 {
0545 struct rds_ib_mr *ibmr = trans_private;
0546
0547 return ibmr->u.mr->lkey;
0548 }
0549
0550 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
0551 struct rds_sock *rs, u32 *key_ret,
0552 struct rds_connection *conn,
0553 u64 start, u64 length, int need_odp)
0554 {
0555 struct rds_ib_device *rds_ibdev;
0556 struct rds_ib_mr *ibmr = NULL;
0557 struct rds_ib_connection *ic = NULL;
0558 int ret;
0559
0560 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr.s6_addr32[3]);
0561 if (!rds_ibdev) {
0562 ret = -ENODEV;
0563 goto out;
0564 }
0565
0566 if (need_odp == ODP_ZEROBASED || need_odp == ODP_VIRTUAL) {
0567 u64 virt_addr = need_odp == ODP_ZEROBASED ? 0 : start;
0568 int access_flags =
0569 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
0570 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC |
0571 IB_ACCESS_ON_DEMAND);
0572 struct ib_sge sge = {};
0573 struct ib_mr *ib_mr;
0574
0575 if (!rds_ibdev->odp_capable) {
0576 ret = -EOPNOTSUPP;
0577 goto out;
0578 }
0579
0580 ib_mr = ib_reg_user_mr(rds_ibdev->pd, start, length, virt_addr,
0581 access_flags);
0582
0583 if (IS_ERR(ib_mr)) {
0584 rdsdebug("rds_ib_get_user_mr returned %d\n",
0585 IS_ERR(ib_mr));
0586 ret = PTR_ERR(ib_mr);
0587 goto out;
0588 }
0589 if (key_ret)
0590 *key_ret = ib_mr->rkey;
0591
0592 ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL);
0593 if (!ibmr) {
0594 ib_dereg_mr(ib_mr);
0595 ret = -ENOMEM;
0596 goto out;
0597 }
0598 ibmr->u.mr = ib_mr;
0599 ibmr->odp = 1;
0600
0601 sge.addr = virt_addr;
0602 sge.length = length;
0603 sge.lkey = ib_mr->lkey;
0604
0605 ib_advise_mr(rds_ibdev->pd,
0606 IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE,
0607 IB_UVERBS_ADVISE_MR_FLAG_FLUSH, &sge, 1);
0608 return ibmr;
0609 }
0610
0611 if (conn)
0612 ic = conn->c_transport_data;
0613
0614 if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) {
0615 ret = -ENODEV;
0616 goto out;
0617 }
0618
0619 ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret);
0620 if (IS_ERR(ibmr)) {
0621 ret = PTR_ERR(ibmr);
0622 pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret);
0623 } else {
0624 return ibmr;
0625 }
0626
0627 out:
0628 if (rds_ibdev)
0629 rds_ib_dev_put(rds_ibdev);
0630
0631 return ERR_PTR(ret);
0632 }
0633
0634 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
0635 {
0636 cancel_delayed_work_sync(&pool->flush_worker);
0637 rds_ib_flush_mr_pool(pool, 1, NULL);
0638 WARN_ON(atomic_read(&pool->item_count));
0639 WARN_ON(atomic_read(&pool->free_pinned));
0640 kfree(pool);
0641 }
0642
0643 struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
0644 int pool_type)
0645 {
0646 struct rds_ib_mr_pool *pool;
0647
0648 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
0649 if (!pool)
0650 return ERR_PTR(-ENOMEM);
0651
0652 pool->pool_type = pool_type;
0653 init_llist_head(&pool->free_list);
0654 init_llist_head(&pool->drop_list);
0655 init_llist_head(&pool->clean_list);
0656 spin_lock_init(&pool->clean_lock);
0657 mutex_init(&pool->flush_lock);
0658 init_waitqueue_head(&pool->flush_wait);
0659 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
0660
0661 if (pool_type == RDS_IB_MR_1M_POOL) {
0662
0663 pool->max_pages = RDS_MR_1M_MSG_SIZE + 1;
0664 pool->max_items = rds_ibdev->max_1m_mrs;
0665 } else {
0666
0667 pool->max_pages = RDS_MR_8K_MSG_SIZE + 1;
0668 pool->max_items = rds_ibdev->max_8k_mrs;
0669 }
0670
0671 pool->max_free_pinned = pool->max_items * pool->max_pages / 4;
0672 pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4;
0673
0674 return pool;
0675 }
0676
0677 int rds_ib_mr_init(void)
0678 {
0679 rds_ib_mr_wq = alloc_workqueue("rds_mr_flushd", WQ_MEM_RECLAIM, 0);
0680 if (!rds_ib_mr_wq)
0681 return -ENOMEM;
0682 return 0;
0683 }
0684
0685
0686
0687
0688
0689 void rds_ib_mr_exit(void)
0690 {
0691 destroy_workqueue(rds_ib_mr_wq);
0692 }
0693
0694 static void rds_ib_odp_mr_worker(struct work_struct *work)
0695 {
0696 struct rds_ib_mr *ibmr;
0697
0698 ibmr = container_of(work, struct rds_ib_mr, work.work);
0699 ib_dereg_mr(ibmr->u.mr);
0700 kfree(ibmr);
0701 }