0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #include <linux/slab.h>
0035 #include <linux/errno.h>
0036
0037 #include "mthca_dev.h"
0038 #include "mthca_cmd.h"
0039 #include "mthca_memfree.h"
0040
0041 struct mthca_mtt {
0042 struct mthca_buddy *buddy;
0043 int order;
0044 u32 first_seg;
0045 };
0046
0047
0048
0049
0050 struct mthca_mpt_entry {
0051 __be32 flags;
0052 __be32 page_size;
0053 __be32 key;
0054 __be32 pd;
0055 __be64 start;
0056 __be64 length;
0057 __be32 lkey;
0058 __be32 window_count;
0059 __be32 window_count_limit;
0060 __be64 mtt_seg;
0061 __be32 mtt_sz;
0062 u32 reserved[2];
0063 } __packed;
0064
0065 #define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28)
0066 #define MTHCA_MPT_FLAG_MIO (1 << 17)
0067 #define MTHCA_MPT_FLAG_BIND_ENABLE (1 << 15)
0068 #define MTHCA_MPT_FLAG_PHYSICAL (1 << 9)
0069 #define MTHCA_MPT_FLAG_REGION (1 << 8)
0070
0071 #define MTHCA_MTT_FLAG_PRESENT 1
0072
0073 #define MTHCA_MPT_STATUS_SW 0xF0
0074 #define MTHCA_MPT_STATUS_HW 0x00
0075
0076 #define SINAI_FMR_KEY_INC 0x1000000
0077
0078
0079
0080
0081
0082
0083
0084 static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
0085 {
0086 int o;
0087 int m;
0088 u32 seg;
0089
0090 spin_lock(&buddy->lock);
0091
0092 for (o = order; o <= buddy->max_order; ++o)
0093 if (buddy->num_free[o]) {
0094 m = 1 << (buddy->max_order - o);
0095 seg = find_first_bit(buddy->bits[o], m);
0096 if (seg < m)
0097 goto found;
0098 }
0099
0100 spin_unlock(&buddy->lock);
0101 return -1;
0102
0103 found:
0104 __clear_bit(seg, buddy->bits[o]);
0105 --buddy->num_free[o];
0106
0107 while (o > order) {
0108 --o;
0109 seg <<= 1;
0110 __set_bit(seg ^ 1, buddy->bits[o]);
0111 ++buddy->num_free[o];
0112 }
0113
0114 spin_unlock(&buddy->lock);
0115
0116 seg <<= order;
0117
0118 return seg;
0119 }
0120
0121 static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order)
0122 {
0123 seg >>= order;
0124
0125 spin_lock(&buddy->lock);
0126
0127 while (test_bit(seg ^ 1, buddy->bits[order])) {
0128 __clear_bit(seg ^ 1, buddy->bits[order]);
0129 --buddy->num_free[order];
0130 seg >>= 1;
0131 ++order;
0132 }
0133
0134 __set_bit(seg, buddy->bits[order]);
0135 ++buddy->num_free[order];
0136
0137 spin_unlock(&buddy->lock);
0138 }
0139
0140 static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
0141 {
0142 int i;
0143
0144 buddy->max_order = max_order;
0145 spin_lock_init(&buddy->lock);
0146
0147 buddy->bits = kcalloc(buddy->max_order + 1, sizeof(long *),
0148 GFP_KERNEL);
0149 buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
0150 GFP_KERNEL);
0151 if (!buddy->bits || !buddy->num_free)
0152 goto err_out;
0153
0154 for (i = 0; i <= buddy->max_order; ++i) {
0155 buddy->bits[i] = bitmap_zalloc(1 << (buddy->max_order - i),
0156 GFP_KERNEL);
0157 if (!buddy->bits[i])
0158 goto err_out_free;
0159 }
0160
0161 __set_bit(0, buddy->bits[buddy->max_order]);
0162 buddy->num_free[buddy->max_order] = 1;
0163
0164 return 0;
0165
0166 err_out_free:
0167 for (i = 0; i <= buddy->max_order; ++i)
0168 bitmap_free(buddy->bits[i]);
0169
0170 err_out:
0171 kfree(buddy->bits);
0172 kfree(buddy->num_free);
0173
0174 return -ENOMEM;
0175 }
0176
0177 static void mthca_buddy_cleanup(struct mthca_buddy *buddy)
0178 {
0179 int i;
0180
0181 for (i = 0; i <= buddy->max_order; ++i)
0182 bitmap_free(buddy->bits[i]);
0183
0184 kfree(buddy->bits);
0185 kfree(buddy->num_free);
0186 }
0187
0188 static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order,
0189 struct mthca_buddy *buddy)
0190 {
0191 u32 seg = mthca_buddy_alloc(buddy, order);
0192
0193 if (seg == -1)
0194 return -1;
0195
0196 if (mthca_is_memfree(dev))
0197 if (mthca_table_get_range(dev, dev->mr_table.mtt_table, seg,
0198 seg + (1 << order) - 1)) {
0199 mthca_buddy_free(buddy, seg, order);
0200 seg = -1;
0201 }
0202
0203 return seg;
0204 }
0205
0206 static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size,
0207 struct mthca_buddy *buddy)
0208 {
0209 struct mthca_mtt *mtt;
0210 int i;
0211
0212 if (size <= 0)
0213 return ERR_PTR(-EINVAL);
0214
0215 mtt = kmalloc(sizeof *mtt, GFP_KERNEL);
0216 if (!mtt)
0217 return ERR_PTR(-ENOMEM);
0218
0219 mtt->buddy = buddy;
0220 mtt->order = 0;
0221 for (i = dev->limits.mtt_seg_size / 8; i < size; i <<= 1)
0222 ++mtt->order;
0223
0224 mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy);
0225 if (mtt->first_seg == -1) {
0226 kfree(mtt);
0227 return ERR_PTR(-ENOMEM);
0228 }
0229
0230 return mtt;
0231 }
0232
0233 struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size)
0234 {
0235 return __mthca_alloc_mtt(dev, size, &dev->mr_table.mtt_buddy);
0236 }
0237
0238 void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt)
0239 {
0240 if (!mtt)
0241 return;
0242
0243 mthca_buddy_free(mtt->buddy, mtt->first_seg, mtt->order);
0244
0245 mthca_table_put_range(dev, dev->mr_table.mtt_table,
0246 mtt->first_seg,
0247 mtt->first_seg + (1 << mtt->order) - 1);
0248
0249 kfree(mtt);
0250 }
0251
0252 static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
0253 int start_index, u64 *buffer_list, int list_len)
0254 {
0255 struct mthca_mailbox *mailbox;
0256 __be64 *mtt_entry;
0257 int err = 0;
0258 int i;
0259
0260 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
0261 if (IS_ERR(mailbox))
0262 return PTR_ERR(mailbox);
0263 mtt_entry = mailbox->buf;
0264
0265 while (list_len > 0) {
0266 mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base +
0267 mtt->first_seg * dev->limits.mtt_seg_size +
0268 start_index * 8);
0269 mtt_entry[1] = 0;
0270 for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i)
0271 mtt_entry[i + 2] = cpu_to_be64(buffer_list[i] |
0272 MTHCA_MTT_FLAG_PRESENT);
0273
0274
0275
0276
0277
0278 if (i & 1)
0279 mtt_entry[i + 2] = 0;
0280
0281 err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1);
0282 if (err) {
0283 mthca_warn(dev, "WRITE_MTT failed (%d)\n", err);
0284 goto out;
0285 }
0286
0287 list_len -= i;
0288 start_index += i;
0289 buffer_list += i;
0290 }
0291
0292 out:
0293 mthca_free_mailbox(dev, mailbox);
0294 return err;
0295 }
0296
0297 int mthca_write_mtt_size(struct mthca_dev *dev)
0298 {
0299 if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy ||
0300 !(dev->mthca_flags & MTHCA_FLAG_FMR))
0301
0302
0303
0304
0305
0306
0307 return PAGE_SIZE / sizeof (u64) - 2;
0308
0309
0310 return mthca_is_memfree(dev) ? (PAGE_SIZE / sizeof (u64)) : 0x7ffffff;
0311 }
0312
0313 static void mthca_tavor_write_mtt_seg(struct mthca_dev *dev,
0314 struct mthca_mtt *mtt, int start_index,
0315 u64 *buffer_list, int list_len)
0316 {
0317 u64 __iomem *mtts;
0318 int i;
0319
0320 mtts = dev->mr_table.tavor_fmr.mtt_base + mtt->first_seg * dev->limits.mtt_seg_size +
0321 start_index * sizeof (u64);
0322 for (i = 0; i < list_len; ++i)
0323 mthca_write64_raw(cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT),
0324 mtts + i);
0325 }
0326
0327 static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev,
0328 struct mthca_mtt *mtt, int start_index,
0329 u64 *buffer_list, int list_len)
0330 {
0331 __be64 *mtts;
0332 dma_addr_t dma_handle;
0333 int i;
0334 int s = start_index * sizeof (u64);
0335
0336
0337 BUG_ON(s / PAGE_SIZE != (s + list_len * sizeof(u64) - 1) / PAGE_SIZE);
0338
0339 BUG_ON(s % dev->limits.mtt_seg_size);
0340
0341 mtts = mthca_table_find(dev->mr_table.mtt_table, mtt->first_seg +
0342 s / dev->limits.mtt_seg_size, &dma_handle);
0343
0344 BUG_ON(!mtts);
0345
0346 dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
0347 list_len * sizeof (u64), DMA_TO_DEVICE);
0348
0349 for (i = 0; i < list_len; ++i)
0350 mtts[i] = cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT);
0351
0352 dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
0353 list_len * sizeof (u64), DMA_TO_DEVICE);
0354 }
0355
0356 int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
0357 int start_index, u64 *buffer_list, int list_len)
0358 {
0359 int size = mthca_write_mtt_size(dev);
0360 int chunk;
0361
0362 if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy ||
0363 !(dev->mthca_flags & MTHCA_FLAG_FMR))
0364 return __mthca_write_mtt(dev, mtt, start_index, buffer_list, list_len);
0365
0366 while (list_len > 0) {
0367 chunk = min(size, list_len);
0368 if (mthca_is_memfree(dev))
0369 mthca_arbel_write_mtt_seg(dev, mtt, start_index,
0370 buffer_list, chunk);
0371 else
0372 mthca_tavor_write_mtt_seg(dev, mtt, start_index,
0373 buffer_list, chunk);
0374
0375 list_len -= chunk;
0376 start_index += chunk;
0377 buffer_list += chunk;
0378 }
0379
0380 return 0;
0381 }
0382
0383 static inline u32 tavor_hw_index_to_key(u32 ind)
0384 {
0385 return ind;
0386 }
0387
0388 static inline u32 tavor_key_to_hw_index(u32 key)
0389 {
0390 return key;
0391 }
0392
0393 static inline u32 arbel_hw_index_to_key(u32 ind)
0394 {
0395 return (ind >> 24) | (ind << 8);
0396 }
0397
0398 static inline u32 arbel_key_to_hw_index(u32 key)
0399 {
0400 return (key << 24) | (key >> 8);
0401 }
0402
0403 static inline u32 hw_index_to_key(struct mthca_dev *dev, u32 ind)
0404 {
0405 if (mthca_is_memfree(dev))
0406 return arbel_hw_index_to_key(ind);
0407 else
0408 return tavor_hw_index_to_key(ind);
0409 }
0410
0411 static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key)
0412 {
0413 if (mthca_is_memfree(dev))
0414 return arbel_key_to_hw_index(key);
0415 else
0416 return tavor_key_to_hw_index(key);
0417 }
0418
0419 static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
0420 {
0421 if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
0422 return ((key << 20) & 0x800000) | (key & 0x7fffff);
0423 else
0424 return key;
0425 }
0426
0427 int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
0428 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
0429 {
0430 struct mthca_mailbox *mailbox;
0431 struct mthca_mpt_entry *mpt_entry;
0432 u32 key;
0433 int i;
0434 int err;
0435
0436 WARN_ON(buffer_size_shift >= 32);
0437
0438 key = mthca_alloc(&dev->mr_table.mpt_alloc);
0439 if (key == -1)
0440 return -ENOMEM;
0441 key = adjust_key(dev, key);
0442 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
0443
0444 if (mthca_is_memfree(dev)) {
0445 err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
0446 if (err)
0447 goto err_out_mpt_free;
0448 }
0449
0450 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
0451 if (IS_ERR(mailbox)) {
0452 err = PTR_ERR(mailbox);
0453 goto err_out_table;
0454 }
0455 mpt_entry = mailbox->buf;
0456
0457 mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
0458 MTHCA_MPT_FLAG_MIO |
0459 MTHCA_MPT_FLAG_REGION |
0460 access);
0461 if (!mr->mtt)
0462 mpt_entry->flags |= cpu_to_be32(MTHCA_MPT_FLAG_PHYSICAL);
0463
0464 mpt_entry->page_size = cpu_to_be32(buffer_size_shift - 12);
0465 mpt_entry->key = cpu_to_be32(key);
0466 mpt_entry->pd = cpu_to_be32(pd);
0467 mpt_entry->start = cpu_to_be64(iova);
0468 mpt_entry->length = cpu_to_be64(total_size);
0469
0470 memset_startat(mpt_entry, 0, lkey);
0471
0472 if (mr->mtt)
0473 mpt_entry->mtt_seg =
0474 cpu_to_be64(dev->mr_table.mtt_base +
0475 mr->mtt->first_seg * dev->limits.mtt_seg_size);
0476
0477 if (0) {
0478 mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
0479 for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
0480 if (i % 4 == 0)
0481 printk("[%02x] ", i * 4);
0482 printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
0483 if ((i + 1) % 4 == 0)
0484 printk("\n");
0485 }
0486 }
0487
0488 err = mthca_SW2HW_MPT(dev, mailbox,
0489 key & (dev->limits.num_mpts - 1));
0490 if (err) {
0491 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
0492 goto err_out_mailbox;
0493 }
0494
0495 mthca_free_mailbox(dev, mailbox);
0496 return err;
0497
0498 err_out_mailbox:
0499 mthca_free_mailbox(dev, mailbox);
0500
0501 err_out_table:
0502 mthca_table_put(dev, dev->mr_table.mpt_table, key);
0503
0504 err_out_mpt_free:
0505 mthca_free(&dev->mr_table.mpt_alloc, key);
0506 return err;
0507 }
0508
0509 int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
0510 u32 access, struct mthca_mr *mr)
0511 {
0512 mr->mtt = NULL;
0513 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
0514 }
0515
0516 int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
0517 u64 *buffer_list, int buffer_size_shift,
0518 int list_len, u64 iova, u64 total_size,
0519 u32 access, struct mthca_mr *mr)
0520 {
0521 int err;
0522
0523 mr->mtt = mthca_alloc_mtt(dev, list_len);
0524 if (IS_ERR(mr->mtt))
0525 return PTR_ERR(mr->mtt);
0526
0527 err = mthca_write_mtt(dev, mr->mtt, 0, buffer_list, list_len);
0528 if (err) {
0529 mthca_free_mtt(dev, mr->mtt);
0530 return err;
0531 }
0532
0533 err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova,
0534 total_size, access, mr);
0535 if (err)
0536 mthca_free_mtt(dev, mr->mtt);
0537
0538 return err;
0539 }
0540
0541
0542 static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
0543 {
0544 mthca_table_put(dev, dev->mr_table.mpt_table,
0545 key_to_hw_index(dev, lkey));
0546
0547 mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey));
0548 }
0549
0550 void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr)
0551 {
0552 int err;
0553
0554 err = mthca_HW2SW_MPT(dev, NULL,
0555 key_to_hw_index(dev, mr->ibmr.lkey) &
0556 (dev->limits.num_mpts - 1));
0557 if (err)
0558 mthca_warn(dev, "HW2SW_MPT failed (%d)\n", err);
0559
0560 mthca_free_region(dev, mr->ibmr.lkey);
0561 mthca_free_mtt(dev, mr->mtt);
0562 }
0563
0564 int mthca_init_mr_table(struct mthca_dev *dev)
0565 {
0566 phys_addr_t addr;
0567 int mpts, mtts, err, i;
0568
0569 err = mthca_alloc_init(&dev->mr_table.mpt_alloc,
0570 dev->limits.num_mpts,
0571 ~0, dev->limits.reserved_mrws);
0572 if (err)
0573 return err;
0574
0575 if (!mthca_is_memfree(dev) &&
0576 (dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN))
0577 dev->limits.fmr_reserved_mtts = 0;
0578 else
0579 dev->mthca_flags |= MTHCA_FLAG_FMR;
0580
0581 if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
0582 mthca_dbg(dev, "Memory key throughput optimization activated.\n");
0583
0584 err = mthca_buddy_init(&dev->mr_table.mtt_buddy,
0585 fls(dev->limits.num_mtt_segs - 1));
0586
0587 if (err)
0588 goto err_mtt_buddy;
0589
0590 dev->mr_table.tavor_fmr.mpt_base = NULL;
0591 dev->mr_table.tavor_fmr.mtt_base = NULL;
0592
0593 if (dev->limits.fmr_reserved_mtts) {
0594 i = fls(dev->limits.fmr_reserved_mtts - 1);
0595
0596 if (i >= 31) {
0597 mthca_warn(dev, "Unable to reserve 2^31 FMR MTTs.\n");
0598 err = -EINVAL;
0599 goto err_fmr_mpt;
0600 }
0601 mpts = mtts = 1 << i;
0602 } else {
0603 mtts = dev->limits.num_mtt_segs;
0604 mpts = dev->limits.num_mpts;
0605 }
0606
0607 if (!mthca_is_memfree(dev) &&
0608 (dev->mthca_flags & MTHCA_FLAG_FMR)) {
0609
0610 addr = pci_resource_start(dev->pdev, 4) +
0611 ((pci_resource_len(dev->pdev, 4) - 1) &
0612 dev->mr_table.mpt_base);
0613
0614 dev->mr_table.tavor_fmr.mpt_base =
0615 ioremap(addr, mpts * sizeof(struct mthca_mpt_entry));
0616
0617 if (!dev->mr_table.tavor_fmr.mpt_base) {
0618 mthca_warn(dev, "MPT ioremap for FMR failed.\n");
0619 err = -ENOMEM;
0620 goto err_fmr_mpt;
0621 }
0622
0623 addr = pci_resource_start(dev->pdev, 4) +
0624 ((pci_resource_len(dev->pdev, 4) - 1) &
0625 dev->mr_table.mtt_base);
0626
0627 dev->mr_table.tavor_fmr.mtt_base =
0628 ioremap(addr, mtts * dev->limits.mtt_seg_size);
0629 if (!dev->mr_table.tavor_fmr.mtt_base) {
0630 mthca_warn(dev, "MTT ioremap for FMR failed.\n");
0631 err = -ENOMEM;
0632 goto err_fmr_mtt;
0633 }
0634 }
0635
0636 if (dev->limits.fmr_reserved_mtts) {
0637 err = mthca_buddy_init(&dev->mr_table.tavor_fmr.mtt_buddy, fls(mtts - 1));
0638 if (err)
0639 goto err_fmr_mtt_buddy;
0640
0641
0642 err = mthca_buddy_alloc(&dev->mr_table.mtt_buddy, fls(mtts - 1));
0643 if (err)
0644 goto err_reserve_fmr;
0645
0646 dev->mr_table.fmr_mtt_buddy =
0647 &dev->mr_table.tavor_fmr.mtt_buddy;
0648 } else
0649 dev->mr_table.fmr_mtt_buddy = &dev->mr_table.mtt_buddy;
0650
0651
0652 if (dev->limits.reserved_mtts) {
0653 i = fls(dev->limits.reserved_mtts - 1);
0654
0655 if (mthca_alloc_mtt_range(dev, i,
0656 dev->mr_table.fmr_mtt_buddy) == -1) {
0657 mthca_warn(dev, "MTT table of order %d is too small.\n",
0658 dev->mr_table.fmr_mtt_buddy->max_order);
0659 err = -ENOMEM;
0660 goto err_reserve_mtts;
0661 }
0662 }
0663
0664 return 0;
0665
0666 err_reserve_mtts:
0667 err_reserve_fmr:
0668 if (dev->limits.fmr_reserved_mtts)
0669 mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy);
0670
0671 err_fmr_mtt_buddy:
0672 if (dev->mr_table.tavor_fmr.mtt_base)
0673 iounmap(dev->mr_table.tavor_fmr.mtt_base);
0674
0675 err_fmr_mtt:
0676 if (dev->mr_table.tavor_fmr.mpt_base)
0677 iounmap(dev->mr_table.tavor_fmr.mpt_base);
0678
0679 err_fmr_mpt:
0680 mthca_buddy_cleanup(&dev->mr_table.mtt_buddy);
0681
0682 err_mtt_buddy:
0683 mthca_alloc_cleanup(&dev->mr_table.mpt_alloc);
0684
0685 return err;
0686 }
0687
0688 void mthca_cleanup_mr_table(struct mthca_dev *dev)
0689 {
0690
0691 if (dev->limits.fmr_reserved_mtts)
0692 mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy);
0693
0694 mthca_buddy_cleanup(&dev->mr_table.mtt_buddy);
0695
0696 if (dev->mr_table.tavor_fmr.mtt_base)
0697 iounmap(dev->mr_table.tavor_fmr.mtt_base);
0698 if (dev->mr_table.tavor_fmr.mpt_base)
0699 iounmap(dev->mr_table.tavor_fmr.mpt_base);
0700
0701 mthca_alloc_cleanup(&dev->mr_table.mpt_alloc);
0702 }