0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/cdev.h>
0010 #include <linux/debugfs.h>
0011 #include <linux/device.h>
0012 #include <linux/dma-buf.h>
0013 #include <linux/err.h>
0014 #include <linux/xarray.h>
0015 #include <linux/list.h>
0016 #include <linux/slab.h>
0017 #include <linux/nospec.h>
0018 #include <linux/uaccess.h>
0019 #include <linux/syscalls.h>
0020 #include <linux/dma-heap.h>
0021 #include <uapi/linux/dma-heap.h>
0022
0023 #define DEVNAME "dma_heap"
0024
0025 #define NUM_HEAP_MINORS 128
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037 struct dma_heap {
0038 const char *name;
0039 const struct dma_heap_ops *ops;
0040 void *priv;
0041 dev_t heap_devt;
0042 struct list_head list;
0043 struct cdev heap_cdev;
0044 };
0045
0046 static LIST_HEAD(heap_list);
0047 static DEFINE_MUTEX(heap_list_lock);
0048 static dev_t dma_heap_devt;
0049 static struct class *dma_heap_class;
0050 static DEFINE_XARRAY_ALLOC(dma_heap_minors);
0051
0052 static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
0053 unsigned int fd_flags,
0054 unsigned int heap_flags)
0055 {
0056 struct dma_buf *dmabuf;
0057 int fd;
0058
0059
0060
0061
0062
0063 len = PAGE_ALIGN(len);
0064 if (!len)
0065 return -EINVAL;
0066
0067 dmabuf = heap->ops->allocate(heap, len, fd_flags, heap_flags);
0068 if (IS_ERR(dmabuf))
0069 return PTR_ERR(dmabuf);
0070
0071 fd = dma_buf_fd(dmabuf, fd_flags);
0072 if (fd < 0) {
0073 dma_buf_put(dmabuf);
0074
0075 }
0076 return fd;
0077 }
0078
0079 static int dma_heap_open(struct inode *inode, struct file *file)
0080 {
0081 struct dma_heap *heap;
0082
0083 heap = xa_load(&dma_heap_minors, iminor(inode));
0084 if (!heap) {
0085 pr_err("dma_heap: minor %d unknown.\n", iminor(inode));
0086 return -ENODEV;
0087 }
0088
0089
0090 file->private_data = heap;
0091 nonseekable_open(inode, file);
0092
0093 return 0;
0094 }
0095
0096 static long dma_heap_ioctl_allocate(struct file *file, void *data)
0097 {
0098 struct dma_heap_allocation_data *heap_allocation = data;
0099 struct dma_heap *heap = file->private_data;
0100 int fd;
0101
0102 if (heap_allocation->fd)
0103 return -EINVAL;
0104
0105 if (heap_allocation->fd_flags & ~DMA_HEAP_VALID_FD_FLAGS)
0106 return -EINVAL;
0107
0108 if (heap_allocation->heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS)
0109 return -EINVAL;
0110
0111 fd = dma_heap_buffer_alloc(heap, heap_allocation->len,
0112 heap_allocation->fd_flags,
0113 heap_allocation->heap_flags);
0114 if (fd < 0)
0115 return fd;
0116
0117 heap_allocation->fd = fd;
0118
0119 return 0;
0120 }
0121
0122 static unsigned int dma_heap_ioctl_cmds[] = {
0123 DMA_HEAP_IOCTL_ALLOC,
0124 };
0125
0126 static long dma_heap_ioctl(struct file *file, unsigned int ucmd,
0127 unsigned long arg)
0128 {
0129 char stack_kdata[128];
0130 char *kdata = stack_kdata;
0131 unsigned int kcmd;
0132 unsigned int in_size, out_size, drv_size, ksize;
0133 int nr = _IOC_NR(ucmd);
0134 int ret = 0;
0135
0136 if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds))
0137 return -EINVAL;
0138
0139 nr = array_index_nospec(nr, ARRAY_SIZE(dma_heap_ioctl_cmds));
0140
0141 kcmd = dma_heap_ioctl_cmds[nr];
0142
0143
0144 drv_size = _IOC_SIZE(kcmd);
0145 out_size = _IOC_SIZE(ucmd);
0146 in_size = out_size;
0147 if ((ucmd & kcmd & IOC_IN) == 0)
0148 in_size = 0;
0149 if ((ucmd & kcmd & IOC_OUT) == 0)
0150 out_size = 0;
0151 ksize = max(max(in_size, out_size), drv_size);
0152
0153
0154 if (ksize > sizeof(stack_kdata)) {
0155 kdata = kmalloc(ksize, GFP_KERNEL);
0156 if (!kdata)
0157 return -ENOMEM;
0158 }
0159
0160 if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) {
0161 ret = -EFAULT;
0162 goto err;
0163 }
0164
0165
0166 if (ksize > in_size)
0167 memset(kdata + in_size, 0, ksize - in_size);
0168
0169 switch (kcmd) {
0170 case DMA_HEAP_IOCTL_ALLOC:
0171 ret = dma_heap_ioctl_allocate(file, kdata);
0172 break;
0173 default:
0174 ret = -ENOTTY;
0175 goto err;
0176 }
0177
0178 if (copy_to_user((void __user *)arg, kdata, out_size) != 0)
0179 ret = -EFAULT;
0180 err:
0181 if (kdata != stack_kdata)
0182 kfree(kdata);
0183 return ret;
0184 }
0185
0186 static const struct file_operations dma_heap_fops = {
0187 .owner = THIS_MODULE,
0188 .open = dma_heap_open,
0189 .unlocked_ioctl = dma_heap_ioctl,
0190 #ifdef CONFIG_COMPAT
0191 .compat_ioctl = dma_heap_ioctl,
0192 #endif
0193 };
0194
0195
0196
0197
0198
0199
0200
0201
0202 void *dma_heap_get_drvdata(struct dma_heap *heap)
0203 {
0204 return heap->priv;
0205 }
0206
0207
0208
0209
0210
0211
0212
0213
0214 const char *dma_heap_get_name(struct dma_heap *heap)
0215 {
0216 return heap->name;
0217 }
0218
0219 struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
0220 {
0221 struct dma_heap *heap, *h, *err_ret;
0222 struct device *dev_ret;
0223 unsigned int minor;
0224 int ret;
0225
0226 if (!exp_info->name || !strcmp(exp_info->name, "")) {
0227 pr_err("dma_heap: Cannot add heap without a name\n");
0228 return ERR_PTR(-EINVAL);
0229 }
0230
0231 if (!exp_info->ops || !exp_info->ops->allocate) {
0232 pr_err("dma_heap: Cannot add heap with invalid ops struct\n");
0233 return ERR_PTR(-EINVAL);
0234 }
0235
0236
0237 mutex_lock(&heap_list_lock);
0238 list_for_each_entry(h, &heap_list, list) {
0239 if (!strcmp(h->name, exp_info->name)) {
0240 mutex_unlock(&heap_list_lock);
0241 pr_err("dma_heap: Already registered heap named %s\n",
0242 exp_info->name);
0243 return ERR_PTR(-EINVAL);
0244 }
0245 }
0246 mutex_unlock(&heap_list_lock);
0247
0248 heap = kzalloc(sizeof(*heap), GFP_KERNEL);
0249 if (!heap)
0250 return ERR_PTR(-ENOMEM);
0251
0252 heap->name = exp_info->name;
0253 heap->ops = exp_info->ops;
0254 heap->priv = exp_info->priv;
0255
0256
0257 ret = xa_alloc(&dma_heap_minors, &minor, heap,
0258 XA_LIMIT(0, NUM_HEAP_MINORS - 1), GFP_KERNEL);
0259 if (ret < 0) {
0260 pr_err("dma_heap: Unable to get minor number for heap\n");
0261 err_ret = ERR_PTR(ret);
0262 goto err0;
0263 }
0264
0265
0266 heap->heap_devt = MKDEV(MAJOR(dma_heap_devt), minor);
0267
0268 cdev_init(&heap->heap_cdev, &dma_heap_fops);
0269 ret = cdev_add(&heap->heap_cdev, heap->heap_devt, 1);
0270 if (ret < 0) {
0271 pr_err("dma_heap: Unable to add char device\n");
0272 err_ret = ERR_PTR(ret);
0273 goto err1;
0274 }
0275
0276 dev_ret = device_create(dma_heap_class,
0277 NULL,
0278 heap->heap_devt,
0279 NULL,
0280 heap->name);
0281 if (IS_ERR(dev_ret)) {
0282 pr_err("dma_heap: Unable to create device\n");
0283 err_ret = ERR_CAST(dev_ret);
0284 goto err2;
0285 }
0286
0287 mutex_lock(&heap_list_lock);
0288 list_add(&heap->list, &heap_list);
0289 mutex_unlock(&heap_list_lock);
0290
0291 return heap;
0292
0293 err2:
0294 cdev_del(&heap->heap_cdev);
0295 err1:
0296 xa_erase(&dma_heap_minors, minor);
0297 err0:
0298 kfree(heap);
0299 return err_ret;
0300 }
0301
0302 static char *dma_heap_devnode(struct device *dev, umode_t *mode)
0303 {
0304 return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev));
0305 }
0306
0307 static int dma_heap_init(void)
0308 {
0309 int ret;
0310
0311 ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME);
0312 if (ret)
0313 return ret;
0314
0315 dma_heap_class = class_create(THIS_MODULE, DEVNAME);
0316 if (IS_ERR(dma_heap_class)) {
0317 unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS);
0318 return PTR_ERR(dma_heap_class);
0319 }
0320 dma_heap_class->devnode = dma_heap_devnode;
0321
0322 return 0;
0323 }
0324 subsys_initcall(dma_heap_init);