0001
0002
0003
0004
0005
0006 #include <linux/device.h>
0007 #include <linux/eventfd.h>
0008 #include <linux/file.h>
0009 #include <linux/interrupt.h>
0010 #include <linux/iommu.h>
0011 #include <linux/module.h>
0012 #include <linux/mutex.h>
0013 #include <linux/notifier.h>
0014 #include <linux/pci.h>
0015 #include <linux/pm_runtime.h>
0016 #include <linux/types.h>
0017 #include <linux/uaccess.h>
0018 #include <linux/vfio.h>
0019 #include <linux/sched/mm.h>
0020 #include <linux/anon_inodes.h>
0021
0022 #include "cmd.h"
0023
0024
0025 #define MAX_MIGRATION_SIZE (512*1024*1024)
0026
0027 static struct mlx5vf_pci_core_device *mlx5vf_drvdata(struct pci_dev *pdev)
0028 {
0029 struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
0030
0031 return container_of(core_device, struct mlx5vf_pci_core_device,
0032 core_device);
0033 }
0034
0035 static struct page *
0036 mlx5vf_get_migration_page(struct mlx5_vf_migration_file *migf,
0037 unsigned long offset)
0038 {
0039 unsigned long cur_offset = 0;
0040 struct scatterlist *sg;
0041 unsigned int i;
0042
0043
0044 if (offset < migf->last_offset || !migf->last_offset_sg) {
0045 migf->last_offset = 0;
0046 migf->last_offset_sg = migf->table.sgt.sgl;
0047 migf->sg_last_entry = 0;
0048 }
0049
0050 cur_offset = migf->last_offset;
0051
0052 for_each_sg(migf->last_offset_sg, sg,
0053 migf->table.sgt.orig_nents - migf->sg_last_entry, i) {
0054 if (offset < sg->length + cur_offset) {
0055 migf->last_offset_sg = sg;
0056 migf->sg_last_entry += i;
0057 migf->last_offset = cur_offset;
0058 return nth_page(sg_page(sg),
0059 (offset - cur_offset) / PAGE_SIZE);
0060 }
0061 cur_offset += sg->length;
0062 }
0063 return NULL;
0064 }
0065
0066 static int mlx5vf_add_migration_pages(struct mlx5_vf_migration_file *migf,
0067 unsigned int npages)
0068 {
0069 unsigned int to_alloc = npages;
0070 struct page **page_list;
0071 unsigned long filled;
0072 unsigned int to_fill;
0073 int ret;
0074
0075 to_fill = min_t(unsigned int, npages, PAGE_SIZE / sizeof(*page_list));
0076 page_list = kvzalloc(to_fill * sizeof(*page_list), GFP_KERNEL);
0077 if (!page_list)
0078 return -ENOMEM;
0079
0080 do {
0081 filled = alloc_pages_bulk_array(GFP_KERNEL, to_fill, page_list);
0082 if (!filled) {
0083 ret = -ENOMEM;
0084 goto err;
0085 }
0086 to_alloc -= filled;
0087 ret = sg_alloc_append_table_from_pages(
0088 &migf->table, page_list, filled, 0,
0089 filled << PAGE_SHIFT, UINT_MAX, SG_MAX_SINGLE_ALLOC,
0090 GFP_KERNEL);
0091
0092 if (ret)
0093 goto err;
0094 migf->allocated_length += filled * PAGE_SIZE;
0095
0096 memset(page_list, 0, filled * sizeof(*page_list));
0097 to_fill = min_t(unsigned int, to_alloc,
0098 PAGE_SIZE / sizeof(*page_list));
0099 } while (to_alloc > 0);
0100
0101 kvfree(page_list);
0102 return 0;
0103
0104 err:
0105 kvfree(page_list);
0106 return ret;
0107 }
0108
0109 static void mlx5vf_disable_fd(struct mlx5_vf_migration_file *migf)
0110 {
0111 struct sg_page_iter sg_iter;
0112
0113 mutex_lock(&migf->lock);
0114
0115 for_each_sgtable_page(&migf->table.sgt, &sg_iter, 0)
0116 __free_page(sg_page_iter_page(&sg_iter));
0117 sg_free_append_table(&migf->table);
0118 migf->disabled = true;
0119 migf->total_length = 0;
0120 migf->allocated_length = 0;
0121 migf->filp->f_pos = 0;
0122 mutex_unlock(&migf->lock);
0123 }
0124
0125 static int mlx5vf_release_file(struct inode *inode, struct file *filp)
0126 {
0127 struct mlx5_vf_migration_file *migf = filp->private_data;
0128
0129 mlx5vf_disable_fd(migf);
0130 mutex_destroy(&migf->lock);
0131 kfree(migf);
0132 return 0;
0133 }
0134
0135 static ssize_t mlx5vf_save_read(struct file *filp, char __user *buf, size_t len,
0136 loff_t *pos)
0137 {
0138 struct mlx5_vf_migration_file *migf = filp->private_data;
0139 ssize_t done = 0;
0140
0141 if (pos)
0142 return -ESPIPE;
0143 pos = &filp->f_pos;
0144
0145 if (!(filp->f_flags & O_NONBLOCK)) {
0146 if (wait_event_interruptible(migf->poll_wait,
0147 READ_ONCE(migf->total_length) || migf->is_err))
0148 return -ERESTARTSYS;
0149 }
0150
0151 mutex_lock(&migf->lock);
0152 if ((filp->f_flags & O_NONBLOCK) && !READ_ONCE(migf->total_length)) {
0153 done = -EAGAIN;
0154 goto out_unlock;
0155 }
0156 if (*pos > migf->total_length) {
0157 done = -EINVAL;
0158 goto out_unlock;
0159 }
0160 if (migf->disabled || migf->is_err) {
0161 done = -ENODEV;
0162 goto out_unlock;
0163 }
0164
0165 len = min_t(size_t, migf->total_length - *pos, len);
0166 while (len) {
0167 size_t page_offset;
0168 struct page *page;
0169 size_t page_len;
0170 u8 *from_buff;
0171 int ret;
0172
0173 page_offset = (*pos) % PAGE_SIZE;
0174 page = mlx5vf_get_migration_page(migf, *pos - page_offset);
0175 if (!page) {
0176 if (done == 0)
0177 done = -EINVAL;
0178 goto out_unlock;
0179 }
0180
0181 page_len = min_t(size_t, len, PAGE_SIZE - page_offset);
0182 from_buff = kmap_local_page(page);
0183 ret = copy_to_user(buf, from_buff + page_offset, page_len);
0184 kunmap_local(from_buff);
0185 if (ret) {
0186 done = -EFAULT;
0187 goto out_unlock;
0188 }
0189 *pos += page_len;
0190 len -= page_len;
0191 done += page_len;
0192 buf += page_len;
0193 }
0194
0195 out_unlock:
0196 mutex_unlock(&migf->lock);
0197 return done;
0198 }
0199
0200 static __poll_t mlx5vf_save_poll(struct file *filp,
0201 struct poll_table_struct *wait)
0202 {
0203 struct mlx5_vf_migration_file *migf = filp->private_data;
0204 __poll_t pollflags = 0;
0205
0206 poll_wait(filp, &migf->poll_wait, wait);
0207
0208 mutex_lock(&migf->lock);
0209 if (migf->disabled || migf->is_err)
0210 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
0211 else if (READ_ONCE(migf->total_length))
0212 pollflags = EPOLLIN | EPOLLRDNORM;
0213 mutex_unlock(&migf->lock);
0214
0215 return pollflags;
0216 }
0217
0218 static const struct file_operations mlx5vf_save_fops = {
0219 .owner = THIS_MODULE,
0220 .read = mlx5vf_save_read,
0221 .poll = mlx5vf_save_poll,
0222 .release = mlx5vf_release_file,
0223 .llseek = no_llseek,
0224 };
0225
0226 static struct mlx5_vf_migration_file *
0227 mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev)
0228 {
0229 struct mlx5_vf_migration_file *migf;
0230 int ret;
0231
0232 migf = kzalloc(sizeof(*migf), GFP_KERNEL);
0233 if (!migf)
0234 return ERR_PTR(-ENOMEM);
0235
0236 migf->filp = anon_inode_getfile("mlx5vf_mig", &mlx5vf_save_fops, migf,
0237 O_RDONLY);
0238 if (IS_ERR(migf->filp)) {
0239 int err = PTR_ERR(migf->filp);
0240
0241 kfree(migf);
0242 return ERR_PTR(err);
0243 }
0244
0245 stream_open(migf->filp->f_inode, migf->filp);
0246 mutex_init(&migf->lock);
0247 init_waitqueue_head(&migf->poll_wait);
0248 mlx5_cmd_init_async_ctx(mvdev->mdev, &migf->async_ctx);
0249 INIT_WORK(&migf->async_data.work, mlx5vf_mig_file_cleanup_cb);
0250 ret = mlx5vf_cmd_query_vhca_migration_state(mvdev,
0251 &migf->total_length);
0252 if (ret)
0253 goto out_free;
0254
0255 ret = mlx5vf_add_migration_pages(
0256 migf, DIV_ROUND_UP_ULL(migf->total_length, PAGE_SIZE));
0257 if (ret)
0258 goto out_free;
0259
0260 migf->mvdev = mvdev;
0261 ret = mlx5vf_cmd_save_vhca_state(mvdev, migf);
0262 if (ret)
0263 goto out_free;
0264 return migf;
0265 out_free:
0266 fput(migf->filp);
0267 return ERR_PTR(ret);
0268 }
0269
0270 static ssize_t mlx5vf_resume_write(struct file *filp, const char __user *buf,
0271 size_t len, loff_t *pos)
0272 {
0273 struct mlx5_vf_migration_file *migf = filp->private_data;
0274 loff_t requested_length;
0275 ssize_t done = 0;
0276
0277 if (pos)
0278 return -ESPIPE;
0279 pos = &filp->f_pos;
0280
0281 if (*pos < 0 ||
0282 check_add_overflow((loff_t)len, *pos, &requested_length))
0283 return -EINVAL;
0284
0285 if (requested_length > MAX_MIGRATION_SIZE)
0286 return -ENOMEM;
0287
0288 mutex_lock(&migf->lock);
0289 if (migf->disabled) {
0290 done = -ENODEV;
0291 goto out_unlock;
0292 }
0293
0294 if (migf->allocated_length < requested_length) {
0295 done = mlx5vf_add_migration_pages(
0296 migf,
0297 DIV_ROUND_UP(requested_length - migf->allocated_length,
0298 PAGE_SIZE));
0299 if (done)
0300 goto out_unlock;
0301 }
0302
0303 while (len) {
0304 size_t page_offset;
0305 struct page *page;
0306 size_t page_len;
0307 u8 *to_buff;
0308 int ret;
0309
0310 page_offset = (*pos) % PAGE_SIZE;
0311 page = mlx5vf_get_migration_page(migf, *pos - page_offset);
0312 if (!page) {
0313 if (done == 0)
0314 done = -EINVAL;
0315 goto out_unlock;
0316 }
0317
0318 page_len = min_t(size_t, len, PAGE_SIZE - page_offset);
0319 to_buff = kmap_local_page(page);
0320 ret = copy_from_user(to_buff + page_offset, buf, page_len);
0321 kunmap_local(to_buff);
0322 if (ret) {
0323 done = -EFAULT;
0324 goto out_unlock;
0325 }
0326 *pos += page_len;
0327 len -= page_len;
0328 done += page_len;
0329 buf += page_len;
0330 migf->total_length += page_len;
0331 }
0332 out_unlock:
0333 mutex_unlock(&migf->lock);
0334 return done;
0335 }
0336
0337 static const struct file_operations mlx5vf_resume_fops = {
0338 .owner = THIS_MODULE,
0339 .write = mlx5vf_resume_write,
0340 .release = mlx5vf_release_file,
0341 .llseek = no_llseek,
0342 };
0343
0344 static struct mlx5_vf_migration_file *
0345 mlx5vf_pci_resume_device_data(struct mlx5vf_pci_core_device *mvdev)
0346 {
0347 struct mlx5_vf_migration_file *migf;
0348
0349 migf = kzalloc(sizeof(*migf), GFP_KERNEL);
0350 if (!migf)
0351 return ERR_PTR(-ENOMEM);
0352
0353 migf->filp = anon_inode_getfile("mlx5vf_mig", &mlx5vf_resume_fops, migf,
0354 O_WRONLY);
0355 if (IS_ERR(migf->filp)) {
0356 int err = PTR_ERR(migf->filp);
0357
0358 kfree(migf);
0359 return ERR_PTR(err);
0360 }
0361 stream_open(migf->filp->f_inode, migf->filp);
0362 mutex_init(&migf->lock);
0363 return migf;
0364 }
0365
0366 void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev)
0367 {
0368 if (mvdev->resuming_migf) {
0369 mlx5vf_disable_fd(mvdev->resuming_migf);
0370 fput(mvdev->resuming_migf->filp);
0371 mvdev->resuming_migf = NULL;
0372 }
0373 if (mvdev->saving_migf) {
0374 mlx5_cmd_cleanup_async_ctx(&mvdev->saving_migf->async_ctx);
0375 cancel_work_sync(&mvdev->saving_migf->async_data.work);
0376 mlx5vf_disable_fd(mvdev->saving_migf);
0377 fput(mvdev->saving_migf->filp);
0378 mvdev->saving_migf = NULL;
0379 }
0380 }
0381
0382 static struct file *
0383 mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
0384 u32 new)
0385 {
0386 u32 cur = mvdev->mig_state;
0387 int ret;
0388
0389 if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) {
0390 ret = mlx5vf_cmd_suspend_vhca(mvdev,
0391 MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_RESPONDER);
0392 if (ret)
0393 return ERR_PTR(ret);
0394 return NULL;
0395 }
0396
0397 if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P) {
0398 ret = mlx5vf_cmd_resume_vhca(mvdev,
0399 MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_RESPONDER);
0400 if (ret)
0401 return ERR_PTR(ret);
0402 return NULL;
0403 }
0404
0405 if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) {
0406 ret = mlx5vf_cmd_suspend_vhca(mvdev,
0407 MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_INITIATOR);
0408 if (ret)
0409 return ERR_PTR(ret);
0410 return NULL;
0411 }
0412
0413 if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) {
0414 ret = mlx5vf_cmd_resume_vhca(mvdev,
0415 MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_INITIATOR);
0416 if (ret)
0417 return ERR_PTR(ret);
0418 return NULL;
0419 }
0420
0421 if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
0422 struct mlx5_vf_migration_file *migf;
0423
0424 migf = mlx5vf_pci_save_device_data(mvdev);
0425 if (IS_ERR(migf))
0426 return ERR_CAST(migf);
0427 get_file(migf->filp);
0428 mvdev->saving_migf = migf;
0429 return migf->filp;
0430 }
0431
0432 if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP)) {
0433 mlx5vf_disable_fds(mvdev);
0434 return NULL;
0435 }
0436
0437 if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
0438 struct mlx5_vf_migration_file *migf;
0439
0440 migf = mlx5vf_pci_resume_device_data(mvdev);
0441 if (IS_ERR(migf))
0442 return ERR_CAST(migf);
0443 get_file(migf->filp);
0444 mvdev->resuming_migf = migf;
0445 return migf->filp;
0446 }
0447
0448 if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
0449 ret = mlx5vf_cmd_load_vhca_state(mvdev,
0450 mvdev->resuming_migf);
0451 if (ret)
0452 return ERR_PTR(ret);
0453 mlx5vf_disable_fds(mvdev);
0454 return NULL;
0455 }
0456
0457
0458
0459
0460 WARN_ON(true);
0461 return ERR_PTR(-EINVAL);
0462 }
0463
0464
0465
0466
0467
0468 void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev)
0469 {
0470 again:
0471 spin_lock(&mvdev->reset_lock);
0472 if (mvdev->deferred_reset) {
0473 mvdev->deferred_reset = false;
0474 spin_unlock(&mvdev->reset_lock);
0475 mvdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
0476 mlx5vf_disable_fds(mvdev);
0477 goto again;
0478 }
0479 mutex_unlock(&mvdev->state_mutex);
0480 spin_unlock(&mvdev->reset_lock);
0481 }
0482
0483 static struct file *
0484 mlx5vf_pci_set_device_state(struct vfio_device *vdev,
0485 enum vfio_device_mig_state new_state)
0486 {
0487 struct mlx5vf_pci_core_device *mvdev = container_of(
0488 vdev, struct mlx5vf_pci_core_device, core_device.vdev);
0489 enum vfio_device_mig_state next_state;
0490 struct file *res = NULL;
0491 int ret;
0492
0493 mutex_lock(&mvdev->state_mutex);
0494 while (new_state != mvdev->mig_state) {
0495 ret = vfio_mig_get_next_state(vdev, mvdev->mig_state,
0496 new_state, &next_state);
0497 if (ret) {
0498 res = ERR_PTR(ret);
0499 break;
0500 }
0501 res = mlx5vf_pci_step_device_state_locked(mvdev, next_state);
0502 if (IS_ERR(res))
0503 break;
0504 mvdev->mig_state = next_state;
0505 if (WARN_ON(res && new_state != mvdev->mig_state)) {
0506 fput(res);
0507 res = ERR_PTR(-EINVAL);
0508 break;
0509 }
0510 }
0511 mlx5vf_state_mutex_unlock(mvdev);
0512 return res;
0513 }
0514
0515 static int mlx5vf_pci_get_device_state(struct vfio_device *vdev,
0516 enum vfio_device_mig_state *curr_state)
0517 {
0518 struct mlx5vf_pci_core_device *mvdev = container_of(
0519 vdev, struct mlx5vf_pci_core_device, core_device.vdev);
0520
0521 mutex_lock(&mvdev->state_mutex);
0522 *curr_state = mvdev->mig_state;
0523 mlx5vf_state_mutex_unlock(mvdev);
0524 return 0;
0525 }
0526
0527 static void mlx5vf_pci_aer_reset_done(struct pci_dev *pdev)
0528 {
0529 struct mlx5vf_pci_core_device *mvdev = mlx5vf_drvdata(pdev);
0530
0531 if (!mvdev->migrate_cap)
0532 return;
0533
0534
0535
0536
0537
0538
0539
0540
0541 spin_lock(&mvdev->reset_lock);
0542 mvdev->deferred_reset = true;
0543 if (!mutex_trylock(&mvdev->state_mutex)) {
0544 spin_unlock(&mvdev->reset_lock);
0545 return;
0546 }
0547 spin_unlock(&mvdev->reset_lock);
0548 mlx5vf_state_mutex_unlock(mvdev);
0549 }
0550
0551 static int mlx5vf_pci_open_device(struct vfio_device *core_vdev)
0552 {
0553 struct mlx5vf_pci_core_device *mvdev = container_of(
0554 core_vdev, struct mlx5vf_pci_core_device, core_device.vdev);
0555 struct vfio_pci_core_device *vdev = &mvdev->core_device;
0556 int ret;
0557
0558 ret = vfio_pci_core_enable(vdev);
0559 if (ret)
0560 return ret;
0561
0562 if (mvdev->migrate_cap)
0563 mvdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
0564 vfio_pci_core_finish_enable(vdev);
0565 return 0;
0566 }
0567
0568 static void mlx5vf_pci_close_device(struct vfio_device *core_vdev)
0569 {
0570 struct mlx5vf_pci_core_device *mvdev = container_of(
0571 core_vdev, struct mlx5vf_pci_core_device, core_device.vdev);
0572
0573 mlx5vf_cmd_close_migratable(mvdev);
0574 vfio_pci_core_close_device(core_vdev);
0575 }
0576
0577 static const struct vfio_migration_ops mlx5vf_pci_mig_ops = {
0578 .migration_set_state = mlx5vf_pci_set_device_state,
0579 .migration_get_state = mlx5vf_pci_get_device_state,
0580 };
0581
0582 static const struct vfio_device_ops mlx5vf_pci_ops = {
0583 .name = "mlx5-vfio-pci",
0584 .open_device = mlx5vf_pci_open_device,
0585 .close_device = mlx5vf_pci_close_device,
0586 .ioctl = vfio_pci_core_ioctl,
0587 .device_feature = vfio_pci_core_ioctl_feature,
0588 .read = vfio_pci_core_read,
0589 .write = vfio_pci_core_write,
0590 .mmap = vfio_pci_core_mmap,
0591 .request = vfio_pci_core_request,
0592 .match = vfio_pci_core_match,
0593 };
0594
0595 static int mlx5vf_pci_probe(struct pci_dev *pdev,
0596 const struct pci_device_id *id)
0597 {
0598 struct mlx5vf_pci_core_device *mvdev;
0599 int ret;
0600
0601 mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL);
0602 if (!mvdev)
0603 return -ENOMEM;
0604 vfio_pci_core_init_device(&mvdev->core_device, pdev, &mlx5vf_pci_ops);
0605 mlx5vf_cmd_set_migratable(mvdev, &mlx5vf_pci_mig_ops);
0606 dev_set_drvdata(&pdev->dev, &mvdev->core_device);
0607 ret = vfio_pci_core_register_device(&mvdev->core_device);
0608 if (ret)
0609 goto out_free;
0610 return 0;
0611
0612 out_free:
0613 mlx5vf_cmd_remove_migratable(mvdev);
0614 vfio_pci_core_uninit_device(&mvdev->core_device);
0615 kfree(mvdev);
0616 return ret;
0617 }
0618
0619 static void mlx5vf_pci_remove(struct pci_dev *pdev)
0620 {
0621 struct mlx5vf_pci_core_device *mvdev = mlx5vf_drvdata(pdev);
0622
0623 vfio_pci_core_unregister_device(&mvdev->core_device);
0624 mlx5vf_cmd_remove_migratable(mvdev);
0625 vfio_pci_core_uninit_device(&mvdev->core_device);
0626 kfree(mvdev);
0627 }
0628
0629 static const struct pci_device_id mlx5vf_pci_table[] = {
0630 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_MELLANOX, 0x101e) },
0631 {}
0632 };
0633
0634 MODULE_DEVICE_TABLE(pci, mlx5vf_pci_table);
0635
0636 static const struct pci_error_handlers mlx5vf_err_handlers = {
0637 .reset_done = mlx5vf_pci_aer_reset_done,
0638 .error_detected = vfio_pci_core_aer_err_detected,
0639 };
0640
0641 static struct pci_driver mlx5vf_pci_driver = {
0642 .name = KBUILD_MODNAME,
0643 .id_table = mlx5vf_pci_table,
0644 .probe = mlx5vf_pci_probe,
0645 .remove = mlx5vf_pci_remove,
0646 .err_handler = &mlx5vf_err_handlers,
0647 .driver_managed_dma = true,
0648 };
0649
0650 static void __exit mlx5vf_pci_cleanup(void)
0651 {
0652 pci_unregister_driver(&mlx5vf_pci_driver);
0653 }
0654
0655 static int __init mlx5vf_pci_init(void)
0656 {
0657 return pci_register_driver(&mlx5vf_pci_driver);
0658 }
0659
0660 module_init(mlx5vf_pci_init);
0661 module_exit(mlx5vf_pci_cleanup);
0662
0663 MODULE_LICENSE("GPL");
0664 MODULE_AUTHOR("Max Gurtovoy <mgurtovoy@nvidia.com>");
0665 MODULE_AUTHOR("Yishai Hadas <yishaih@nvidia.com>");
0666 MODULE_DESCRIPTION(
0667 "MLX5 VFIO PCI - User Level meta-driver for MLX5 device family");