0001
0002
0003
0004
0005
0006
0007 #define dev_fmt(fmt) "VFIO: " fmt
0008
0009 #include <linux/device.h>
0010 #include <linux/acpi.h>
0011 #include <linux/iommu.h>
0012 #include <linux/module.h>
0013 #include <linux/mutex.h>
0014 #include <linux/pm_runtime.h>
0015 #include <linux/slab.h>
0016 #include <linux/types.h>
0017 #include <linux/uaccess.h>
0018 #include <linux/vfio.h>
0019
0020 #include "vfio_platform_private.h"
0021
0022 #define DRIVER_VERSION "0.10"
0023 #define DRIVER_AUTHOR "Antonios Motakis <a.motakis@virtualopensystems.com>"
0024 #define DRIVER_DESC "VFIO platform base module"
0025
0026 #define VFIO_PLATFORM_IS_ACPI(vdev) ((vdev)->acpihid != NULL)
0027
0028 static LIST_HEAD(reset_list);
0029 static DEFINE_MUTEX(driver_lock);
0030
0031 static vfio_platform_reset_fn_t vfio_platform_lookup_reset(const char *compat,
0032 struct module **module)
0033 {
0034 struct vfio_platform_reset_node *iter;
0035 vfio_platform_reset_fn_t reset_fn = NULL;
0036
0037 mutex_lock(&driver_lock);
0038 list_for_each_entry(iter, &reset_list, link) {
0039 if (!strcmp(iter->compat, compat) &&
0040 try_module_get(iter->owner)) {
0041 *module = iter->owner;
0042 reset_fn = iter->of_reset;
0043 break;
0044 }
0045 }
0046 mutex_unlock(&driver_lock);
0047 return reset_fn;
0048 }
0049
0050 static int vfio_platform_acpi_probe(struct vfio_platform_device *vdev,
0051 struct device *dev)
0052 {
0053 struct acpi_device *adev;
0054
0055 if (acpi_disabled)
0056 return -ENOENT;
0057
0058 adev = ACPI_COMPANION(dev);
0059 if (!adev) {
0060 dev_err(dev, "ACPI companion device not found for %s\n",
0061 vdev->name);
0062 return -ENODEV;
0063 }
0064
0065 #ifdef CONFIG_ACPI
0066 vdev->acpihid = acpi_device_hid(adev);
0067 #endif
0068 return WARN_ON(!vdev->acpihid) ? -EINVAL : 0;
0069 }
0070
0071 static int vfio_platform_acpi_call_reset(struct vfio_platform_device *vdev,
0072 const char **extra_dbg)
0073 {
0074 #ifdef CONFIG_ACPI
0075 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
0076 struct device *dev = vdev->device;
0077 acpi_handle handle = ACPI_HANDLE(dev);
0078 acpi_status acpi_ret;
0079
0080 acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, &buffer);
0081 if (ACPI_FAILURE(acpi_ret)) {
0082 if (extra_dbg)
0083 *extra_dbg = acpi_format_exception(acpi_ret);
0084 return -EINVAL;
0085 }
0086
0087 return 0;
0088 #else
0089 return -ENOENT;
0090 #endif
0091 }
0092
0093 static bool vfio_platform_acpi_has_reset(struct vfio_platform_device *vdev)
0094 {
0095 #ifdef CONFIG_ACPI
0096 struct device *dev = vdev->device;
0097 acpi_handle handle = ACPI_HANDLE(dev);
0098
0099 return acpi_has_method(handle, "_RST");
0100 #else
0101 return false;
0102 #endif
0103 }
0104
0105 static bool vfio_platform_has_reset(struct vfio_platform_device *vdev)
0106 {
0107 if (VFIO_PLATFORM_IS_ACPI(vdev))
0108 return vfio_platform_acpi_has_reset(vdev);
0109
0110 return vdev->of_reset ? true : false;
0111 }
0112
0113 static int vfio_platform_get_reset(struct vfio_platform_device *vdev)
0114 {
0115 if (VFIO_PLATFORM_IS_ACPI(vdev))
0116 return vfio_platform_acpi_has_reset(vdev) ? 0 : -ENOENT;
0117
0118 vdev->of_reset = vfio_platform_lookup_reset(vdev->compat,
0119 &vdev->reset_module);
0120 if (!vdev->of_reset) {
0121 request_module("vfio-reset:%s", vdev->compat);
0122 vdev->of_reset = vfio_platform_lookup_reset(vdev->compat,
0123 &vdev->reset_module);
0124 }
0125
0126 return vdev->of_reset ? 0 : -ENOENT;
0127 }
0128
0129 static void vfio_platform_put_reset(struct vfio_platform_device *vdev)
0130 {
0131 if (VFIO_PLATFORM_IS_ACPI(vdev))
0132 return;
0133
0134 if (vdev->of_reset)
0135 module_put(vdev->reset_module);
0136 }
0137
0138 static int vfio_platform_regions_init(struct vfio_platform_device *vdev)
0139 {
0140 int cnt = 0, i;
0141
0142 while (vdev->get_resource(vdev, cnt))
0143 cnt++;
0144
0145 vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region),
0146 GFP_KERNEL);
0147 if (!vdev->regions)
0148 return -ENOMEM;
0149
0150 for (i = 0; i < cnt; i++) {
0151 struct resource *res =
0152 vdev->get_resource(vdev, i);
0153
0154 if (!res)
0155 goto err;
0156
0157 vdev->regions[i].addr = res->start;
0158 vdev->regions[i].size = resource_size(res);
0159 vdev->regions[i].flags = 0;
0160
0161 switch (resource_type(res)) {
0162 case IORESOURCE_MEM:
0163 vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO;
0164 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
0165 if (!(res->flags & IORESOURCE_READONLY))
0166 vdev->regions[i].flags |=
0167 VFIO_REGION_INFO_FLAG_WRITE;
0168
0169
0170
0171
0172
0173 if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
0174 !(vdev->regions[i].size & ~PAGE_MASK))
0175 vdev->regions[i].flags |=
0176 VFIO_REGION_INFO_FLAG_MMAP;
0177
0178 break;
0179 case IORESOURCE_IO:
0180 vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_PIO;
0181 break;
0182 default:
0183 goto err;
0184 }
0185 }
0186
0187 vdev->num_regions = cnt;
0188
0189 return 0;
0190 err:
0191 kfree(vdev->regions);
0192 return -EINVAL;
0193 }
0194
0195 static void vfio_platform_regions_cleanup(struct vfio_platform_device *vdev)
0196 {
0197 int i;
0198
0199 for (i = 0; i < vdev->num_regions; i++)
0200 iounmap(vdev->regions[i].ioaddr);
0201
0202 vdev->num_regions = 0;
0203 kfree(vdev->regions);
0204 }
0205
0206 static int vfio_platform_call_reset(struct vfio_platform_device *vdev,
0207 const char **extra_dbg)
0208 {
0209 if (VFIO_PLATFORM_IS_ACPI(vdev)) {
0210 dev_info(vdev->device, "reset\n");
0211 return vfio_platform_acpi_call_reset(vdev, extra_dbg);
0212 } else if (vdev->of_reset) {
0213 dev_info(vdev->device, "reset\n");
0214 return vdev->of_reset(vdev);
0215 }
0216
0217 dev_warn(vdev->device, "no reset function found!\n");
0218 return -EINVAL;
0219 }
0220
0221 static void vfio_platform_close_device(struct vfio_device *core_vdev)
0222 {
0223 struct vfio_platform_device *vdev =
0224 container_of(core_vdev, struct vfio_platform_device, vdev);
0225 const char *extra_dbg = NULL;
0226 int ret;
0227
0228 ret = vfio_platform_call_reset(vdev, &extra_dbg);
0229 if (WARN_ON(ret && vdev->reset_required)) {
0230 dev_warn(
0231 vdev->device,
0232 "reset driver is required and reset call failed in release (%d) %s\n",
0233 ret, extra_dbg ? extra_dbg : "");
0234 }
0235 pm_runtime_put(vdev->device);
0236 vfio_platform_regions_cleanup(vdev);
0237 vfio_platform_irq_cleanup(vdev);
0238 }
0239
0240 static int vfio_platform_open_device(struct vfio_device *core_vdev)
0241 {
0242 struct vfio_platform_device *vdev =
0243 container_of(core_vdev, struct vfio_platform_device, vdev);
0244 const char *extra_dbg = NULL;
0245 int ret;
0246
0247 ret = vfio_platform_regions_init(vdev);
0248 if (ret)
0249 return ret;
0250
0251 ret = vfio_platform_irq_init(vdev);
0252 if (ret)
0253 goto err_irq;
0254
0255 ret = pm_runtime_get_sync(vdev->device);
0256 if (ret < 0)
0257 goto err_rst;
0258
0259 ret = vfio_platform_call_reset(vdev, &extra_dbg);
0260 if (ret && vdev->reset_required) {
0261 dev_warn(
0262 vdev->device,
0263 "reset driver is required and reset call failed in open (%d) %s\n",
0264 ret, extra_dbg ? extra_dbg : "");
0265 goto err_rst;
0266 }
0267 return 0;
0268
0269 err_rst:
0270 pm_runtime_put(vdev->device);
0271 vfio_platform_irq_cleanup(vdev);
0272 err_irq:
0273 vfio_platform_regions_cleanup(vdev);
0274 return ret;
0275 }
0276
0277 static long vfio_platform_ioctl(struct vfio_device *core_vdev,
0278 unsigned int cmd, unsigned long arg)
0279 {
0280 struct vfio_platform_device *vdev =
0281 container_of(core_vdev, struct vfio_platform_device, vdev);
0282
0283 unsigned long minsz;
0284
0285 if (cmd == VFIO_DEVICE_GET_INFO) {
0286 struct vfio_device_info info;
0287
0288 minsz = offsetofend(struct vfio_device_info, num_irqs);
0289
0290 if (copy_from_user(&info, (void __user *)arg, minsz))
0291 return -EFAULT;
0292
0293 if (info.argsz < minsz)
0294 return -EINVAL;
0295
0296 if (vfio_platform_has_reset(vdev))
0297 vdev->flags |= VFIO_DEVICE_FLAGS_RESET;
0298 info.flags = vdev->flags;
0299 info.num_regions = vdev->num_regions;
0300 info.num_irqs = vdev->num_irqs;
0301
0302 return copy_to_user((void __user *)arg, &info, minsz) ?
0303 -EFAULT : 0;
0304
0305 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
0306 struct vfio_region_info info;
0307
0308 minsz = offsetofend(struct vfio_region_info, offset);
0309
0310 if (copy_from_user(&info, (void __user *)arg, minsz))
0311 return -EFAULT;
0312
0313 if (info.argsz < minsz)
0314 return -EINVAL;
0315
0316 if (info.index >= vdev->num_regions)
0317 return -EINVAL;
0318
0319
0320 info.offset = VFIO_PLATFORM_INDEX_TO_OFFSET(info.index);
0321 info.size = vdev->regions[info.index].size;
0322 info.flags = vdev->regions[info.index].flags;
0323
0324 return copy_to_user((void __user *)arg, &info, minsz) ?
0325 -EFAULT : 0;
0326
0327 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
0328 struct vfio_irq_info info;
0329
0330 minsz = offsetofend(struct vfio_irq_info, count);
0331
0332 if (copy_from_user(&info, (void __user *)arg, minsz))
0333 return -EFAULT;
0334
0335 if (info.argsz < minsz)
0336 return -EINVAL;
0337
0338 if (info.index >= vdev->num_irqs)
0339 return -EINVAL;
0340
0341 info.flags = vdev->irqs[info.index].flags;
0342 info.count = vdev->irqs[info.index].count;
0343
0344 return copy_to_user((void __user *)arg, &info, minsz) ?
0345 -EFAULT : 0;
0346
0347 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
0348 struct vfio_irq_set hdr;
0349 u8 *data = NULL;
0350 int ret = 0;
0351 size_t data_size = 0;
0352
0353 minsz = offsetofend(struct vfio_irq_set, count);
0354
0355 if (copy_from_user(&hdr, (void __user *)arg, minsz))
0356 return -EFAULT;
0357
0358 ret = vfio_set_irqs_validate_and_prepare(&hdr, vdev->num_irqs,
0359 vdev->num_irqs, &data_size);
0360 if (ret)
0361 return ret;
0362
0363 if (data_size) {
0364 data = memdup_user((void __user *)(arg + minsz),
0365 data_size);
0366 if (IS_ERR(data))
0367 return PTR_ERR(data);
0368 }
0369
0370 mutex_lock(&vdev->igate);
0371
0372 ret = vfio_platform_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
0373 hdr.start, hdr.count, data);
0374 mutex_unlock(&vdev->igate);
0375 kfree(data);
0376
0377 return ret;
0378
0379 } else if (cmd == VFIO_DEVICE_RESET) {
0380 return vfio_platform_call_reset(vdev, NULL);
0381 }
0382
0383 return -ENOTTY;
0384 }
0385
0386 static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
0387 char __user *buf, size_t count,
0388 loff_t off)
0389 {
0390 unsigned int done = 0;
0391
0392 if (!reg->ioaddr) {
0393 reg->ioaddr =
0394 ioremap(reg->addr, reg->size);
0395
0396 if (!reg->ioaddr)
0397 return -ENOMEM;
0398 }
0399
0400 while (count) {
0401 size_t filled;
0402
0403 if (count >= 4 && !(off % 4)) {
0404 u32 val;
0405
0406 val = ioread32(reg->ioaddr + off);
0407 if (copy_to_user(buf, &val, 4))
0408 goto err;
0409
0410 filled = 4;
0411 } else if (count >= 2 && !(off % 2)) {
0412 u16 val;
0413
0414 val = ioread16(reg->ioaddr + off);
0415 if (copy_to_user(buf, &val, 2))
0416 goto err;
0417
0418 filled = 2;
0419 } else {
0420 u8 val;
0421
0422 val = ioread8(reg->ioaddr + off);
0423 if (copy_to_user(buf, &val, 1))
0424 goto err;
0425
0426 filled = 1;
0427 }
0428
0429
0430 count -= filled;
0431 done += filled;
0432 off += filled;
0433 buf += filled;
0434 }
0435
0436 return done;
0437 err:
0438 return -EFAULT;
0439 }
0440
0441 static ssize_t vfio_platform_read(struct vfio_device *core_vdev,
0442 char __user *buf, size_t count, loff_t *ppos)
0443 {
0444 struct vfio_platform_device *vdev =
0445 container_of(core_vdev, struct vfio_platform_device, vdev);
0446 unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
0447 loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
0448
0449 if (index >= vdev->num_regions)
0450 return -EINVAL;
0451
0452 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ))
0453 return -EINVAL;
0454
0455 if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
0456 return vfio_platform_read_mmio(&vdev->regions[index],
0457 buf, count, off);
0458 else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
0459 return -EINVAL;
0460
0461 return -EINVAL;
0462 }
0463
0464 static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
0465 const char __user *buf, size_t count,
0466 loff_t off)
0467 {
0468 unsigned int done = 0;
0469
0470 if (!reg->ioaddr) {
0471 reg->ioaddr =
0472 ioremap(reg->addr, reg->size);
0473
0474 if (!reg->ioaddr)
0475 return -ENOMEM;
0476 }
0477
0478 while (count) {
0479 size_t filled;
0480
0481 if (count >= 4 && !(off % 4)) {
0482 u32 val;
0483
0484 if (copy_from_user(&val, buf, 4))
0485 goto err;
0486 iowrite32(val, reg->ioaddr + off);
0487
0488 filled = 4;
0489 } else if (count >= 2 && !(off % 2)) {
0490 u16 val;
0491
0492 if (copy_from_user(&val, buf, 2))
0493 goto err;
0494 iowrite16(val, reg->ioaddr + off);
0495
0496 filled = 2;
0497 } else {
0498 u8 val;
0499
0500 if (copy_from_user(&val, buf, 1))
0501 goto err;
0502 iowrite8(val, reg->ioaddr + off);
0503
0504 filled = 1;
0505 }
0506
0507 count -= filled;
0508 done += filled;
0509 off += filled;
0510 buf += filled;
0511 }
0512
0513 return done;
0514 err:
0515 return -EFAULT;
0516 }
0517
0518 static ssize_t vfio_platform_write(struct vfio_device *core_vdev, const char __user *buf,
0519 size_t count, loff_t *ppos)
0520 {
0521 struct vfio_platform_device *vdev =
0522 container_of(core_vdev, struct vfio_platform_device, vdev);
0523 unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
0524 loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
0525
0526 if (index >= vdev->num_regions)
0527 return -EINVAL;
0528
0529 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE))
0530 return -EINVAL;
0531
0532 if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
0533 return vfio_platform_write_mmio(&vdev->regions[index],
0534 buf, count, off);
0535 else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
0536 return -EINVAL;
0537
0538 return -EINVAL;
0539 }
0540
0541 static int vfio_platform_mmap_mmio(struct vfio_platform_region region,
0542 struct vm_area_struct *vma)
0543 {
0544 u64 req_len, pgoff, req_start;
0545
0546 req_len = vma->vm_end - vma->vm_start;
0547 pgoff = vma->vm_pgoff &
0548 ((1U << (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
0549 req_start = pgoff << PAGE_SHIFT;
0550
0551 if (region.size < PAGE_SIZE || req_start + req_len > region.size)
0552 return -EINVAL;
0553
0554 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
0555 vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
0556
0557 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
0558 req_len, vma->vm_page_prot);
0559 }
0560
0561 static int vfio_platform_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma)
0562 {
0563 struct vfio_platform_device *vdev =
0564 container_of(core_vdev, struct vfio_platform_device, vdev);
0565 unsigned int index;
0566
0567 index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT);
0568
0569 if (vma->vm_end < vma->vm_start)
0570 return -EINVAL;
0571 if (!(vma->vm_flags & VM_SHARED))
0572 return -EINVAL;
0573 if (index >= vdev->num_regions)
0574 return -EINVAL;
0575 if (vma->vm_start & ~PAGE_MASK)
0576 return -EINVAL;
0577 if (vma->vm_end & ~PAGE_MASK)
0578 return -EINVAL;
0579
0580 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
0581 return -EINVAL;
0582
0583 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
0584 && (vma->vm_flags & VM_READ))
0585 return -EINVAL;
0586
0587 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
0588 && (vma->vm_flags & VM_WRITE))
0589 return -EINVAL;
0590
0591 vma->vm_private_data = vdev;
0592
0593 if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
0594 return vfio_platform_mmap_mmio(vdev->regions[index], vma);
0595
0596 else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
0597 return -EINVAL;
0598
0599 return -EINVAL;
0600 }
0601
0602 static const struct vfio_device_ops vfio_platform_ops = {
0603 .name = "vfio-platform",
0604 .open_device = vfio_platform_open_device,
0605 .close_device = vfio_platform_close_device,
0606 .ioctl = vfio_platform_ioctl,
0607 .read = vfio_platform_read,
0608 .write = vfio_platform_write,
0609 .mmap = vfio_platform_mmap,
0610 };
0611
0612 static int vfio_platform_of_probe(struct vfio_platform_device *vdev,
0613 struct device *dev)
0614 {
0615 int ret;
0616
0617 ret = device_property_read_string(dev, "compatible",
0618 &vdev->compat);
0619 if (ret)
0620 dev_err(dev, "Cannot retrieve compat for %s\n", vdev->name);
0621
0622 return ret;
0623 }
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642 int vfio_platform_probe_common(struct vfio_platform_device *vdev,
0643 struct device *dev)
0644 {
0645 int ret;
0646
0647 vfio_init_group_dev(&vdev->vdev, dev, &vfio_platform_ops);
0648
0649 ret = vfio_platform_acpi_probe(vdev, dev);
0650 if (ret)
0651 ret = vfio_platform_of_probe(vdev, dev);
0652
0653 if (ret)
0654 goto out_uninit;
0655
0656 vdev->device = dev;
0657
0658 ret = vfio_platform_get_reset(vdev);
0659 if (ret && vdev->reset_required) {
0660 dev_err(dev, "No reset function found for device %s\n",
0661 vdev->name);
0662 goto out_uninit;
0663 }
0664
0665 ret = vfio_register_group_dev(&vdev->vdev);
0666 if (ret)
0667 goto put_reset;
0668
0669 mutex_init(&vdev->igate);
0670
0671 pm_runtime_enable(dev);
0672 return 0;
0673
0674 put_reset:
0675 vfio_platform_put_reset(vdev);
0676 out_uninit:
0677 vfio_uninit_group_dev(&vdev->vdev);
0678 return ret;
0679 }
0680 EXPORT_SYMBOL_GPL(vfio_platform_probe_common);
0681
0682 void vfio_platform_remove_common(struct vfio_platform_device *vdev)
0683 {
0684 vfio_unregister_group_dev(&vdev->vdev);
0685
0686 pm_runtime_disable(vdev->device);
0687 vfio_platform_put_reset(vdev);
0688 vfio_uninit_group_dev(&vdev->vdev);
0689 }
0690 EXPORT_SYMBOL_GPL(vfio_platform_remove_common);
0691
0692 void __vfio_platform_register_reset(struct vfio_platform_reset_node *node)
0693 {
0694 mutex_lock(&driver_lock);
0695 list_add(&node->link, &reset_list);
0696 mutex_unlock(&driver_lock);
0697 }
0698 EXPORT_SYMBOL_GPL(__vfio_platform_register_reset);
0699
0700 void vfio_platform_unregister_reset(const char *compat,
0701 vfio_platform_reset_fn_t fn)
0702 {
0703 struct vfio_platform_reset_node *iter, *temp;
0704
0705 mutex_lock(&driver_lock);
0706 list_for_each_entry_safe(iter, temp, &reset_list, link) {
0707 if (!strcmp(iter->compat, compat) && (iter->of_reset == fn)) {
0708 list_del(&iter->link);
0709 break;
0710 }
0711 }
0712
0713 mutex_unlock(&driver_lock);
0714
0715 }
0716 EXPORT_SYMBOL_GPL(vfio_platform_unregister_reset);
0717
0718 MODULE_VERSION(DRIVER_VERSION);
0719 MODULE_LICENSE("GPL v2");
0720 MODULE_AUTHOR(DRIVER_AUTHOR);
0721 MODULE_DESCRIPTION(DRIVER_DESC);