0001
0002 #include <linux/compat.h>
0003 #include <linux/dma-mapping.h>
0004 #include <linux/iommu.h>
0005 #include <linux/module.h>
0006 #include <linux/poll.h>
0007 #include <linux/slab.h>
0008 #include <linux/uacce.h>
0009
0010 static struct class *uacce_class;
0011 static dev_t uacce_devt;
0012 static DEFINE_XARRAY_ALLOC(uacce_xa);
0013
0014
0015
0016
0017
0018 static bool uacce_queue_is_valid(struct uacce_queue *q)
0019 {
0020 return q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED;
0021 }
0022
0023 static int uacce_start_queue(struct uacce_queue *q)
0024 {
0025 int ret;
0026
0027 if (q->state != UACCE_Q_INIT)
0028 return -EINVAL;
0029
0030 if (q->uacce->ops->start_queue) {
0031 ret = q->uacce->ops->start_queue(q);
0032 if (ret < 0)
0033 return ret;
0034 }
0035
0036 q->state = UACCE_Q_STARTED;
0037 return 0;
0038 }
0039
0040 static int uacce_put_queue(struct uacce_queue *q)
0041 {
0042 struct uacce_device *uacce = q->uacce;
0043
0044 if ((q->state == UACCE_Q_STARTED) && uacce->ops->stop_queue)
0045 uacce->ops->stop_queue(q);
0046
0047 if ((q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED) &&
0048 uacce->ops->put_queue)
0049 uacce->ops->put_queue(q);
0050
0051 q->state = UACCE_Q_ZOMBIE;
0052
0053 return 0;
0054 }
0055
0056 static long uacce_fops_unl_ioctl(struct file *filep,
0057 unsigned int cmd, unsigned long arg)
0058 {
0059 struct uacce_queue *q = filep->private_data;
0060 struct uacce_device *uacce = q->uacce;
0061 long ret = -ENXIO;
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071 mutex_lock(&uacce->mutex);
0072 if (!uacce_queue_is_valid(q))
0073 goto out_unlock;
0074
0075 switch (cmd) {
0076 case UACCE_CMD_START_Q:
0077 ret = uacce_start_queue(q);
0078 break;
0079 case UACCE_CMD_PUT_Q:
0080 ret = uacce_put_queue(q);
0081 break;
0082 default:
0083 if (uacce->ops->ioctl)
0084 ret = uacce->ops->ioctl(q, cmd, arg);
0085 else
0086 ret = -EINVAL;
0087 }
0088 out_unlock:
0089 mutex_unlock(&uacce->mutex);
0090 return ret;
0091 }
0092
0093 #ifdef CONFIG_COMPAT
0094 static long uacce_fops_compat_ioctl(struct file *filep,
0095 unsigned int cmd, unsigned long arg)
0096 {
0097 arg = (unsigned long)compat_ptr(arg);
0098
0099 return uacce_fops_unl_ioctl(filep, cmd, arg);
0100 }
0101 #endif
0102
0103 static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
0104 {
0105 u32 pasid;
0106 struct iommu_sva *handle;
0107
0108 if (!(uacce->flags & UACCE_DEV_SVA))
0109 return 0;
0110
0111 handle = iommu_sva_bind_device(uacce->parent, current->mm, NULL);
0112 if (IS_ERR(handle))
0113 return PTR_ERR(handle);
0114
0115 pasid = iommu_sva_get_pasid(handle);
0116 if (pasid == IOMMU_PASID_INVALID) {
0117 iommu_sva_unbind_device(handle);
0118 return -ENODEV;
0119 }
0120
0121 q->handle = handle;
0122 q->pasid = pasid;
0123 return 0;
0124 }
0125
0126 static void uacce_unbind_queue(struct uacce_queue *q)
0127 {
0128 if (!q->handle)
0129 return;
0130 iommu_sva_unbind_device(q->handle);
0131 q->handle = NULL;
0132 }
0133
0134 static int uacce_fops_open(struct inode *inode, struct file *filep)
0135 {
0136 struct uacce_device *uacce;
0137 struct uacce_queue *q;
0138 int ret;
0139
0140 uacce = xa_load(&uacce_xa, iminor(inode));
0141 if (!uacce)
0142 return -ENODEV;
0143
0144 q = kzalloc(sizeof(struct uacce_queue), GFP_KERNEL);
0145 if (!q)
0146 return -ENOMEM;
0147
0148 mutex_lock(&uacce->mutex);
0149
0150 if (!uacce->parent) {
0151 ret = -EINVAL;
0152 goto out_with_mem;
0153 }
0154
0155 ret = uacce_bind_queue(uacce, q);
0156 if (ret)
0157 goto out_with_mem;
0158
0159 q->uacce = uacce;
0160
0161 if (uacce->ops->get_queue) {
0162 ret = uacce->ops->get_queue(uacce, q->pasid, q);
0163 if (ret < 0)
0164 goto out_with_bond;
0165 }
0166
0167 init_waitqueue_head(&q->wait);
0168 filep->private_data = q;
0169 uacce->inode = inode;
0170 q->state = UACCE_Q_INIT;
0171 mutex_init(&q->mutex);
0172 list_add(&q->list, &uacce->queues);
0173 mutex_unlock(&uacce->mutex);
0174
0175 return 0;
0176
0177 out_with_bond:
0178 uacce_unbind_queue(q);
0179 out_with_mem:
0180 kfree(q);
0181 mutex_unlock(&uacce->mutex);
0182 return ret;
0183 }
0184
0185 static int uacce_fops_release(struct inode *inode, struct file *filep)
0186 {
0187 struct uacce_queue *q = filep->private_data;
0188 struct uacce_device *uacce = q->uacce;
0189
0190 mutex_lock(&uacce->mutex);
0191 uacce_put_queue(q);
0192 uacce_unbind_queue(q);
0193 list_del(&q->list);
0194 mutex_unlock(&uacce->mutex);
0195 kfree(q);
0196
0197 return 0;
0198 }
0199
0200 static void uacce_vma_close(struct vm_area_struct *vma)
0201 {
0202 struct uacce_queue *q = vma->vm_private_data;
0203 struct uacce_qfile_region *qfr = NULL;
0204
0205 if (vma->vm_pgoff < UACCE_MAX_REGION)
0206 qfr = q->qfrs[vma->vm_pgoff];
0207
0208 kfree(qfr);
0209 }
0210
0211 static const struct vm_operations_struct uacce_vm_ops = {
0212 .close = uacce_vma_close,
0213 };
0214
0215 static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
0216 {
0217 struct uacce_queue *q = filep->private_data;
0218 struct uacce_device *uacce = q->uacce;
0219 struct uacce_qfile_region *qfr;
0220 enum uacce_qfrt type = UACCE_MAX_REGION;
0221 int ret = 0;
0222
0223 if (vma->vm_pgoff < UACCE_MAX_REGION)
0224 type = vma->vm_pgoff;
0225 else
0226 return -EINVAL;
0227
0228 qfr = kzalloc(sizeof(*qfr), GFP_KERNEL);
0229 if (!qfr)
0230 return -ENOMEM;
0231
0232 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK;
0233 vma->vm_ops = &uacce_vm_ops;
0234 vma->vm_private_data = q;
0235 qfr->type = type;
0236
0237 mutex_lock(&q->mutex);
0238 if (!uacce_queue_is_valid(q)) {
0239 ret = -ENXIO;
0240 goto out_with_lock;
0241 }
0242
0243 if (q->qfrs[type]) {
0244 ret = -EEXIST;
0245 goto out_with_lock;
0246 }
0247
0248 switch (type) {
0249 case UACCE_QFRT_MMIO:
0250 case UACCE_QFRT_DUS:
0251 if (!uacce->ops->mmap) {
0252 ret = -EINVAL;
0253 goto out_with_lock;
0254 }
0255
0256 ret = uacce->ops->mmap(q, vma, qfr);
0257 if (ret)
0258 goto out_with_lock;
0259 break;
0260
0261 default:
0262 ret = -EINVAL;
0263 goto out_with_lock;
0264 }
0265
0266 q->qfrs[type] = qfr;
0267 mutex_unlock(&q->mutex);
0268
0269 return ret;
0270
0271 out_with_lock:
0272 mutex_unlock(&q->mutex);
0273 kfree(qfr);
0274 return ret;
0275 }
0276
0277 static __poll_t uacce_fops_poll(struct file *file, poll_table *wait)
0278 {
0279 struct uacce_queue *q = file->private_data;
0280 struct uacce_device *uacce = q->uacce;
0281 __poll_t ret = 0;
0282
0283 mutex_lock(&q->mutex);
0284 if (!uacce_queue_is_valid(q))
0285 goto out_unlock;
0286
0287 poll_wait(file, &q->wait, wait);
0288
0289 if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q))
0290 ret = EPOLLIN | EPOLLRDNORM;
0291
0292 out_unlock:
0293 mutex_unlock(&q->mutex);
0294 return ret;
0295 }
0296
0297 static const struct file_operations uacce_fops = {
0298 .owner = THIS_MODULE,
0299 .open = uacce_fops_open,
0300 .release = uacce_fops_release,
0301 .unlocked_ioctl = uacce_fops_unl_ioctl,
0302 #ifdef CONFIG_COMPAT
0303 .compat_ioctl = uacce_fops_compat_ioctl,
0304 #endif
0305 .mmap = uacce_fops_mmap,
0306 .poll = uacce_fops_poll,
0307 };
0308
0309 #define to_uacce_device(dev) container_of(dev, struct uacce_device, dev)
0310
0311 static ssize_t api_show(struct device *dev,
0312 struct device_attribute *attr, char *buf)
0313 {
0314 struct uacce_device *uacce = to_uacce_device(dev);
0315
0316 return sysfs_emit(buf, "%s\n", uacce->api_ver);
0317 }
0318
0319 static ssize_t flags_show(struct device *dev,
0320 struct device_attribute *attr, char *buf)
0321 {
0322 struct uacce_device *uacce = to_uacce_device(dev);
0323
0324 return sysfs_emit(buf, "%u\n", uacce->flags);
0325 }
0326
0327 static ssize_t available_instances_show(struct device *dev,
0328 struct device_attribute *attr,
0329 char *buf)
0330 {
0331 struct uacce_device *uacce = to_uacce_device(dev);
0332
0333 if (!uacce->ops->get_available_instances)
0334 return -ENODEV;
0335
0336 return sysfs_emit(buf, "%d\n",
0337 uacce->ops->get_available_instances(uacce));
0338 }
0339
0340 static ssize_t algorithms_show(struct device *dev,
0341 struct device_attribute *attr, char *buf)
0342 {
0343 struct uacce_device *uacce = to_uacce_device(dev);
0344
0345 return sysfs_emit(buf, "%s\n", uacce->algs);
0346 }
0347
0348 static ssize_t region_mmio_size_show(struct device *dev,
0349 struct device_attribute *attr, char *buf)
0350 {
0351 struct uacce_device *uacce = to_uacce_device(dev);
0352
0353 return sysfs_emit(buf, "%lu\n",
0354 uacce->qf_pg_num[UACCE_QFRT_MMIO] << PAGE_SHIFT);
0355 }
0356
0357 static ssize_t region_dus_size_show(struct device *dev,
0358 struct device_attribute *attr, char *buf)
0359 {
0360 struct uacce_device *uacce = to_uacce_device(dev);
0361
0362 return sysfs_emit(buf, "%lu\n",
0363 uacce->qf_pg_num[UACCE_QFRT_DUS] << PAGE_SHIFT);
0364 }
0365
0366 static DEVICE_ATTR_RO(api);
0367 static DEVICE_ATTR_RO(flags);
0368 static DEVICE_ATTR_RO(available_instances);
0369 static DEVICE_ATTR_RO(algorithms);
0370 static DEVICE_ATTR_RO(region_mmio_size);
0371 static DEVICE_ATTR_RO(region_dus_size);
0372
0373 static struct attribute *uacce_dev_attrs[] = {
0374 &dev_attr_api.attr,
0375 &dev_attr_flags.attr,
0376 &dev_attr_available_instances.attr,
0377 &dev_attr_algorithms.attr,
0378 &dev_attr_region_mmio_size.attr,
0379 &dev_attr_region_dus_size.attr,
0380 NULL,
0381 };
0382
0383 static umode_t uacce_dev_is_visible(struct kobject *kobj,
0384 struct attribute *attr, int n)
0385 {
0386 struct device *dev = kobj_to_dev(kobj);
0387 struct uacce_device *uacce = to_uacce_device(dev);
0388
0389 if (((attr == &dev_attr_region_mmio_size.attr) &&
0390 (!uacce->qf_pg_num[UACCE_QFRT_MMIO])) ||
0391 ((attr == &dev_attr_region_dus_size.attr) &&
0392 (!uacce->qf_pg_num[UACCE_QFRT_DUS])))
0393 return 0;
0394
0395 return attr->mode;
0396 }
0397
0398 static struct attribute_group uacce_dev_group = {
0399 .is_visible = uacce_dev_is_visible,
0400 .attrs = uacce_dev_attrs,
0401 };
0402
0403 __ATTRIBUTE_GROUPS(uacce_dev);
0404
0405 static void uacce_release(struct device *dev)
0406 {
0407 struct uacce_device *uacce = to_uacce_device(dev);
0408
0409 kfree(uacce);
0410 }
0411
0412 static unsigned int uacce_enable_sva(struct device *parent, unsigned int flags)
0413 {
0414 int ret;
0415
0416 if (!(flags & UACCE_DEV_SVA))
0417 return flags;
0418
0419 flags &= ~UACCE_DEV_SVA;
0420
0421 ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_IOPF);
0422 if (ret) {
0423 dev_err(parent, "failed to enable IOPF feature! ret = %pe\n", ERR_PTR(ret));
0424 return flags;
0425 }
0426
0427 ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA);
0428 if (ret) {
0429 dev_err(parent, "failed to enable SVA feature! ret = %pe\n", ERR_PTR(ret));
0430 iommu_dev_disable_feature(parent, IOMMU_DEV_FEAT_IOPF);
0431 return flags;
0432 }
0433
0434 return flags | UACCE_DEV_SVA;
0435 }
0436
0437 static void uacce_disable_sva(struct uacce_device *uacce)
0438 {
0439 if (!(uacce->flags & UACCE_DEV_SVA))
0440 return;
0441
0442 iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
0443 iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_IOPF);
0444 }
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454 struct uacce_device *uacce_alloc(struct device *parent,
0455 struct uacce_interface *interface)
0456 {
0457 unsigned int flags = interface->flags;
0458 struct uacce_device *uacce;
0459 int ret;
0460
0461 uacce = kzalloc(sizeof(struct uacce_device), GFP_KERNEL);
0462 if (!uacce)
0463 return ERR_PTR(-ENOMEM);
0464
0465 flags = uacce_enable_sva(parent, flags);
0466
0467 uacce->parent = parent;
0468 uacce->flags = flags;
0469 uacce->ops = interface->ops;
0470
0471 ret = xa_alloc(&uacce_xa, &uacce->dev_id, uacce, xa_limit_32b,
0472 GFP_KERNEL);
0473 if (ret < 0)
0474 goto err_with_uacce;
0475
0476 INIT_LIST_HEAD(&uacce->queues);
0477 mutex_init(&uacce->mutex);
0478 device_initialize(&uacce->dev);
0479 uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
0480 uacce->dev.class = uacce_class;
0481 uacce->dev.groups = uacce_dev_groups;
0482 uacce->dev.parent = uacce->parent;
0483 uacce->dev.release = uacce_release;
0484 dev_set_name(&uacce->dev, "%s-%d", interface->name, uacce->dev_id);
0485
0486 return uacce;
0487
0488 err_with_uacce:
0489 uacce_disable_sva(uacce);
0490 kfree(uacce);
0491 return ERR_PTR(ret);
0492 }
0493 EXPORT_SYMBOL_GPL(uacce_alloc);
0494
0495
0496
0497
0498
0499
0500
0501 int uacce_register(struct uacce_device *uacce)
0502 {
0503 if (!uacce)
0504 return -ENODEV;
0505
0506 uacce->cdev = cdev_alloc();
0507 if (!uacce->cdev)
0508 return -ENOMEM;
0509
0510 uacce->cdev->ops = &uacce_fops;
0511 uacce->cdev->owner = THIS_MODULE;
0512
0513 return cdev_device_add(uacce->cdev, &uacce->dev);
0514 }
0515 EXPORT_SYMBOL_GPL(uacce_register);
0516
0517
0518
0519
0520
0521 void uacce_remove(struct uacce_device *uacce)
0522 {
0523 struct uacce_queue *q, *next_q;
0524
0525 if (!uacce)
0526 return;
0527
0528
0529
0530
0531 if (uacce->inode)
0532 unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
0533
0534
0535
0536
0537
0538
0539 mutex_lock(&uacce->mutex);
0540
0541 list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
0542
0543
0544
0545
0546 mutex_lock(&q->mutex);
0547 uacce_put_queue(q);
0548 mutex_unlock(&q->mutex);
0549 uacce_unbind_queue(q);
0550 }
0551
0552
0553 uacce_disable_sva(uacce);
0554
0555 if (uacce->cdev)
0556 cdev_device_del(uacce->cdev, &uacce->dev);
0557 xa_erase(&uacce_xa, uacce->dev_id);
0558
0559
0560
0561
0562 uacce->ops = NULL;
0563 uacce->parent = NULL;
0564 mutex_unlock(&uacce->mutex);
0565 put_device(&uacce->dev);
0566 }
0567 EXPORT_SYMBOL_GPL(uacce_remove);
0568
0569 static int __init uacce_init(void)
0570 {
0571 int ret;
0572
0573 uacce_class = class_create(THIS_MODULE, UACCE_NAME);
0574 if (IS_ERR(uacce_class))
0575 return PTR_ERR(uacce_class);
0576
0577 ret = alloc_chrdev_region(&uacce_devt, 0, MINORMASK, UACCE_NAME);
0578 if (ret)
0579 class_destroy(uacce_class);
0580
0581 return ret;
0582 }
0583
0584 static __exit void uacce_exit(void)
0585 {
0586 unregister_chrdev_region(uacce_devt, MINORMASK);
0587 class_destroy(uacce_class);
0588 }
0589
0590 subsys_initcall(uacce_init);
0591 module_exit(uacce_exit);
0592
0593 MODULE_LICENSE("GPL");
0594 MODULE_AUTHOR("HiSilicon Tech. Co., Ltd.");
0595 MODULE_DESCRIPTION("Accelerator interface for Userland applications");