0001
0002
0003
0004
0005 #include <linux/completion.h>
0006 #include <linux/device.h>
0007 #include <linux/dma-buf.h>
0008 #include <linux/dma-mapping.h>
0009 #include <linux/idr.h>
0010 #include <linux/list.h>
0011 #include <linux/miscdevice.h>
0012 #include <linux/module.h>
0013 #include <linux/of_address.h>
0014 #include <linux/of.h>
0015 #include <linux/sort.h>
0016 #include <linux/of_platform.h>
0017 #include <linux/rpmsg.h>
0018 #include <linux/scatterlist.h>
0019 #include <linux/slab.h>
0020 #include <linux/qcom_scm.h>
0021 #include <uapi/misc/fastrpc.h>
0022
0023 #define ADSP_DOMAIN_ID (0)
0024 #define MDSP_DOMAIN_ID (1)
0025 #define SDSP_DOMAIN_ID (2)
0026 #define CDSP_DOMAIN_ID (3)
0027 #define FASTRPC_DEV_MAX 4
0028 #define FASTRPC_MAX_SESSIONS 14
0029 #define FASTRPC_MAX_VMIDS 16
0030 #define FASTRPC_ALIGN 128
0031 #define FASTRPC_MAX_FDLIST 16
0032 #define FASTRPC_MAX_CRCLIST 64
0033 #define FASTRPC_PHYS(p) ((p) & 0xffffffff)
0034 #define FASTRPC_CTX_MAX (256)
0035 #define FASTRPC_INIT_HANDLE 1
0036 #define FASTRPC_DSP_UTILITIES_HANDLE 2
0037 #define FASTRPC_CTXID_MASK (0xFF0)
0038 #define INIT_FILELEN_MAX (2 * 1024 * 1024)
0039 #define FASTRPC_DEVICE_NAME "fastrpc"
0040 #define ADSP_MMAP_ADD_PAGES 0x1000
0041 #define DSP_UNSUPPORTED_API (0x80000414)
0042
0043 #define FASTRPC_MAX_DSP_ATTRIBUTES (256)
0044 #define FASTRPC_MAX_DSP_ATTRIBUTES_LEN (sizeof(u32) * FASTRPC_MAX_DSP_ATTRIBUTES)
0045
0046
0047 #define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
0048
0049
0050 #define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff)
0051
0052
0053 #define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f)
0054
0055
0056 #define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f)
0057
0058 #define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \
0059 REMOTE_SCALARS_OUTBUFS(sc) + \
0060 REMOTE_SCALARS_INHANDLES(sc)+ \
0061 REMOTE_SCALARS_OUTHANDLES(sc))
0062 #define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \
0063 (((attr & 0x07) << 29) | \
0064 ((method & 0x1f) << 24) | \
0065 ((in & 0xff) << 16) | \
0066 ((out & 0xff) << 8) | \
0067 ((oin & 0x0f) << 4) | \
0068 (oout & 0x0f))
0069
0070 #define FASTRPC_SCALARS(method, in, out) \
0071 FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
0072
0073 #define FASTRPC_CREATE_PROCESS_NARGS 6
0074
0075 #define FASTRPC_RMID_INIT_ATTACH 0
0076 #define FASTRPC_RMID_INIT_RELEASE 1
0077 #define FASTRPC_RMID_INIT_MMAP 4
0078 #define FASTRPC_RMID_INIT_MUNMAP 5
0079 #define FASTRPC_RMID_INIT_CREATE 6
0080 #define FASTRPC_RMID_INIT_CREATE_ATTR 7
0081 #define FASTRPC_RMID_INIT_CREATE_STATIC 8
0082 #define FASTRPC_RMID_INIT_MEM_MAP 10
0083 #define FASTRPC_RMID_INIT_MEM_UNMAP 11
0084
0085
0086 #define AUDIO_PD (0)
0087 #define USER_PD (1)
0088 #define SENSORS_PD (2)
0089
0090 #define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device, miscdev)
0091
0092 static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
0093 "sdsp", "cdsp"};
0094 struct fastrpc_phy_page {
0095 u64 addr;
0096 u64 size;
0097 };
0098
0099 struct fastrpc_invoke_buf {
0100 u32 num;
0101 u32 pgidx;
0102 };
0103
0104 struct fastrpc_remote_dmahandle {
0105 s32 fd;
0106 u32 offset;
0107 u32 len;
0108 };
0109
0110 struct fastrpc_remote_buf {
0111 u64 pv;
0112 u64 len;
0113 };
0114
0115 union fastrpc_remote_arg {
0116 struct fastrpc_remote_buf buf;
0117 struct fastrpc_remote_dmahandle dma;
0118 };
0119
0120 struct fastrpc_mmap_rsp_msg {
0121 u64 vaddr;
0122 };
0123
0124 struct fastrpc_mmap_req_msg {
0125 s32 pgid;
0126 u32 flags;
0127 u64 vaddr;
0128 s32 num;
0129 };
0130
0131 struct fastrpc_mem_map_req_msg {
0132 s32 pgid;
0133 s32 fd;
0134 s32 offset;
0135 u32 flags;
0136 u64 vaddrin;
0137 s32 num;
0138 s32 data_len;
0139 };
0140
0141 struct fastrpc_munmap_req_msg {
0142 s32 pgid;
0143 u64 vaddr;
0144 u64 size;
0145 };
0146
0147 struct fastrpc_mem_unmap_req_msg {
0148 s32 pgid;
0149 s32 fd;
0150 u64 vaddrin;
0151 u64 len;
0152 };
0153
0154 struct fastrpc_msg {
0155 int pid;
0156 int tid;
0157 u64 ctx;
0158 u32 handle;
0159 u32 sc;
0160 u64 addr;
0161 u64 size;
0162 };
0163
0164 struct fastrpc_invoke_rsp {
0165 u64 ctx;
0166 int retval;
0167 };
0168
0169 struct fastrpc_buf_overlap {
0170 u64 start;
0171 u64 end;
0172 int raix;
0173 u64 mstart;
0174 u64 mend;
0175 u64 offset;
0176 };
0177
0178 struct fastrpc_buf {
0179 struct fastrpc_user *fl;
0180 struct dma_buf *dmabuf;
0181 struct device *dev;
0182 void *virt;
0183 u64 phys;
0184 u64 size;
0185
0186 struct mutex lock;
0187 struct list_head attachments;
0188
0189 struct list_head node;
0190 uintptr_t raddr;
0191 };
0192
0193 struct fastrpc_dma_buf_attachment {
0194 struct device *dev;
0195 struct sg_table sgt;
0196 struct list_head node;
0197 };
0198
0199 struct fastrpc_map {
0200 struct list_head node;
0201 struct fastrpc_user *fl;
0202 int fd;
0203 struct dma_buf *buf;
0204 struct sg_table *table;
0205 struct dma_buf_attachment *attach;
0206 u64 phys;
0207 u64 size;
0208 void *va;
0209 u64 len;
0210 u64 raddr;
0211 u32 attr;
0212 struct kref refcount;
0213 };
0214
0215 struct fastrpc_invoke_ctx {
0216 int nscalars;
0217 int nbufs;
0218 int retval;
0219 int pid;
0220 int tgid;
0221 u32 sc;
0222 u32 *crc;
0223 u64 ctxid;
0224 u64 msg_sz;
0225 struct kref refcount;
0226 struct list_head node;
0227 struct completion work;
0228 struct work_struct put_work;
0229 struct fastrpc_msg msg;
0230 struct fastrpc_user *fl;
0231 union fastrpc_remote_arg *rpra;
0232 struct fastrpc_map **maps;
0233 struct fastrpc_buf *buf;
0234 struct fastrpc_invoke_args *args;
0235 struct fastrpc_buf_overlap *olaps;
0236 struct fastrpc_channel_ctx *cctx;
0237 };
0238
0239 struct fastrpc_session_ctx {
0240 struct device *dev;
0241 int sid;
0242 bool used;
0243 bool valid;
0244 };
0245
0246 struct fastrpc_channel_ctx {
0247 int domain_id;
0248 int sesscount;
0249 int vmcount;
0250 u32 perms;
0251 struct qcom_scm_vmperm vmperms[FASTRPC_MAX_VMIDS];
0252 struct rpmsg_device *rpdev;
0253 struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
0254 spinlock_t lock;
0255 struct idr ctx_idr;
0256 struct list_head users;
0257 struct kref refcount;
0258
0259 bool valid_attributes;
0260 u32 dsp_attributes[FASTRPC_MAX_DSP_ATTRIBUTES];
0261 struct fastrpc_device *secure_fdevice;
0262 struct fastrpc_device *fdevice;
0263 bool secure;
0264 bool unsigned_support;
0265 };
0266
0267 struct fastrpc_device {
0268 struct fastrpc_channel_ctx *cctx;
0269 struct miscdevice miscdev;
0270 bool secure;
0271 };
0272
0273 struct fastrpc_user {
0274 struct list_head user;
0275 struct list_head maps;
0276 struct list_head pending;
0277 struct list_head mmaps;
0278
0279 struct fastrpc_channel_ctx *cctx;
0280 struct fastrpc_session_ctx *sctx;
0281 struct fastrpc_buf *init_mem;
0282
0283 int tgid;
0284 int pd;
0285 bool is_secure_dev;
0286
0287 spinlock_t lock;
0288
0289 struct mutex mutex;
0290 };
0291
0292 static void fastrpc_free_map(struct kref *ref)
0293 {
0294 struct fastrpc_map *map;
0295
0296 map = container_of(ref, struct fastrpc_map, refcount);
0297
0298 if (map->table) {
0299 if (map->attr & FASTRPC_ATTR_SECUREMAP) {
0300 struct qcom_scm_vmperm perm;
0301 int err = 0;
0302
0303 perm.vmid = QCOM_SCM_VMID_HLOS;
0304 perm.perm = QCOM_SCM_PERM_RWX;
0305 err = qcom_scm_assign_mem(map->phys, map->size,
0306 &(map->fl->cctx->vmperms[0].vmid), &perm, 1);
0307 if (err) {
0308 dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
0309 map->phys, map->size, err);
0310 return;
0311 }
0312 }
0313 dma_buf_unmap_attachment(map->attach, map->table,
0314 DMA_BIDIRECTIONAL);
0315 dma_buf_detach(map->buf, map->attach);
0316 dma_buf_put(map->buf);
0317 }
0318
0319 kfree(map);
0320 }
0321
0322 static void fastrpc_map_put(struct fastrpc_map *map)
0323 {
0324 if (map)
0325 kref_put(&map->refcount, fastrpc_free_map);
0326 }
0327
0328 static void fastrpc_map_get(struct fastrpc_map *map)
0329 {
0330 if (map)
0331 kref_get(&map->refcount);
0332 }
0333
0334
0335 static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
0336 struct fastrpc_map **ppmap)
0337 {
0338 struct fastrpc_map *map = NULL;
0339
0340 mutex_lock(&fl->mutex);
0341 list_for_each_entry(map, &fl->maps, node) {
0342 if (map->fd == fd) {
0343 *ppmap = map;
0344 mutex_unlock(&fl->mutex);
0345 return 0;
0346 }
0347 }
0348 mutex_unlock(&fl->mutex);
0349
0350 return -ENOENT;
0351 }
0352
0353 static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
0354 struct fastrpc_map **ppmap)
0355 {
0356 int ret = fastrpc_map_lookup(fl, fd, ppmap);
0357
0358 if (!ret)
0359 fastrpc_map_get(*ppmap);
0360
0361 return ret;
0362 }
0363
0364 static void fastrpc_buf_free(struct fastrpc_buf *buf)
0365 {
0366 dma_free_coherent(buf->dev, buf->size, buf->virt,
0367 FASTRPC_PHYS(buf->phys));
0368 kfree(buf);
0369 }
0370
0371 static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
0372 u64 size, struct fastrpc_buf **obuf)
0373 {
0374 struct fastrpc_buf *buf;
0375
0376 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
0377 if (!buf)
0378 return -ENOMEM;
0379
0380 INIT_LIST_HEAD(&buf->attachments);
0381 INIT_LIST_HEAD(&buf->node);
0382 mutex_init(&buf->lock);
0383
0384 buf->fl = fl;
0385 buf->virt = NULL;
0386 buf->phys = 0;
0387 buf->size = size;
0388 buf->dev = dev;
0389 buf->raddr = 0;
0390
0391 buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
0392 GFP_KERNEL);
0393 if (!buf->virt) {
0394 mutex_destroy(&buf->lock);
0395 kfree(buf);
0396 return -ENOMEM;
0397 }
0398
0399 if (fl->sctx && fl->sctx->sid)
0400 buf->phys += ((u64)fl->sctx->sid << 32);
0401
0402 *obuf = buf;
0403
0404 return 0;
0405 }
0406
0407 static void fastrpc_channel_ctx_free(struct kref *ref)
0408 {
0409 struct fastrpc_channel_ctx *cctx;
0410
0411 cctx = container_of(ref, struct fastrpc_channel_ctx, refcount);
0412
0413 kfree(cctx);
0414 }
0415
0416 static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx *cctx)
0417 {
0418 kref_get(&cctx->refcount);
0419 }
0420
0421 static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx *cctx)
0422 {
0423 kref_put(&cctx->refcount, fastrpc_channel_ctx_free);
0424 }
0425
0426 static void fastrpc_context_free(struct kref *ref)
0427 {
0428 struct fastrpc_invoke_ctx *ctx;
0429 struct fastrpc_channel_ctx *cctx;
0430 unsigned long flags;
0431 int i;
0432
0433 ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
0434 cctx = ctx->cctx;
0435
0436 for (i = 0; i < ctx->nbufs; i++)
0437 fastrpc_map_put(ctx->maps[i]);
0438
0439 if (ctx->buf)
0440 fastrpc_buf_free(ctx->buf);
0441
0442 spin_lock_irqsave(&cctx->lock, flags);
0443 idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
0444 spin_unlock_irqrestore(&cctx->lock, flags);
0445
0446 kfree(ctx->maps);
0447 kfree(ctx->olaps);
0448 kfree(ctx);
0449
0450 fastrpc_channel_ctx_put(cctx);
0451 }
0452
0453 static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
0454 {
0455 kref_get(&ctx->refcount);
0456 }
0457
0458 static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
0459 {
0460 kref_put(&ctx->refcount, fastrpc_context_free);
0461 }
0462
0463 static void fastrpc_context_put_wq(struct work_struct *work)
0464 {
0465 struct fastrpc_invoke_ctx *ctx =
0466 container_of(work, struct fastrpc_invoke_ctx, put_work);
0467
0468 fastrpc_context_put(ctx);
0469 }
0470
0471 #define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
0472 static int olaps_cmp(const void *a, const void *b)
0473 {
0474 struct fastrpc_buf_overlap *pa = (struct fastrpc_buf_overlap *)a;
0475 struct fastrpc_buf_overlap *pb = (struct fastrpc_buf_overlap *)b;
0476
0477 int st = CMP(pa->start, pb->start);
0478
0479 int ed = CMP(pb->end, pa->end);
0480
0481 return st == 0 ? ed : st;
0482 }
0483
0484 static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx *ctx)
0485 {
0486 u64 max_end = 0;
0487 int i;
0488
0489 for (i = 0; i < ctx->nbufs; ++i) {
0490 ctx->olaps[i].start = ctx->args[i].ptr;
0491 ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length;
0492 ctx->olaps[i].raix = i;
0493 }
0494
0495 sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL);
0496
0497 for (i = 0; i < ctx->nbufs; ++i) {
0498
0499 if (ctx->olaps[i].start < max_end) {
0500 ctx->olaps[i].mstart = max_end;
0501 ctx->olaps[i].mend = ctx->olaps[i].end;
0502 ctx->olaps[i].offset = max_end - ctx->olaps[i].start;
0503
0504 if (ctx->olaps[i].end > max_end) {
0505 max_end = ctx->olaps[i].end;
0506 } else {
0507 ctx->olaps[i].mend = 0;
0508 ctx->olaps[i].mstart = 0;
0509 }
0510
0511 } else {
0512 ctx->olaps[i].mend = ctx->olaps[i].end;
0513 ctx->olaps[i].mstart = ctx->olaps[i].start;
0514 ctx->olaps[i].offset = 0;
0515 max_end = ctx->olaps[i].end;
0516 }
0517 }
0518 }
0519
0520 static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
0521 struct fastrpc_user *user, u32 kernel, u32 sc,
0522 struct fastrpc_invoke_args *args)
0523 {
0524 struct fastrpc_channel_ctx *cctx = user->cctx;
0525 struct fastrpc_invoke_ctx *ctx = NULL;
0526 unsigned long flags;
0527 int ret;
0528
0529 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
0530 if (!ctx)
0531 return ERR_PTR(-ENOMEM);
0532
0533 INIT_LIST_HEAD(&ctx->node);
0534 ctx->fl = user;
0535 ctx->nscalars = REMOTE_SCALARS_LENGTH(sc);
0536 ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) +
0537 REMOTE_SCALARS_OUTBUFS(sc);
0538
0539 if (ctx->nscalars) {
0540 ctx->maps = kcalloc(ctx->nscalars,
0541 sizeof(*ctx->maps), GFP_KERNEL);
0542 if (!ctx->maps) {
0543 kfree(ctx);
0544 return ERR_PTR(-ENOMEM);
0545 }
0546 ctx->olaps = kcalloc(ctx->nscalars,
0547 sizeof(*ctx->olaps), GFP_KERNEL);
0548 if (!ctx->olaps) {
0549 kfree(ctx->maps);
0550 kfree(ctx);
0551 return ERR_PTR(-ENOMEM);
0552 }
0553 ctx->args = args;
0554 fastrpc_get_buff_overlaps(ctx);
0555 }
0556
0557
0558 fastrpc_channel_ctx_get(cctx);
0559
0560 ctx->sc = sc;
0561 ctx->retval = -1;
0562 ctx->pid = current->pid;
0563 ctx->tgid = user->tgid;
0564 ctx->cctx = cctx;
0565 init_completion(&ctx->work);
0566 INIT_WORK(&ctx->put_work, fastrpc_context_put_wq);
0567
0568 spin_lock(&user->lock);
0569 list_add_tail(&ctx->node, &user->pending);
0570 spin_unlock(&user->lock);
0571
0572 spin_lock_irqsave(&cctx->lock, flags);
0573 ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
0574 FASTRPC_CTX_MAX, GFP_ATOMIC);
0575 if (ret < 0) {
0576 spin_unlock_irqrestore(&cctx->lock, flags);
0577 goto err_idr;
0578 }
0579 ctx->ctxid = ret << 4;
0580 spin_unlock_irqrestore(&cctx->lock, flags);
0581
0582 kref_init(&ctx->refcount);
0583
0584 return ctx;
0585 err_idr:
0586 spin_lock(&user->lock);
0587 list_del(&ctx->node);
0588 spin_unlock(&user->lock);
0589 fastrpc_channel_ctx_put(cctx);
0590 kfree(ctx->maps);
0591 kfree(ctx->olaps);
0592 kfree(ctx);
0593
0594 return ERR_PTR(ret);
0595 }
0596
0597 static struct sg_table *
0598 fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
0599 enum dma_data_direction dir)
0600 {
0601 struct fastrpc_dma_buf_attachment *a = attachment->priv;
0602 struct sg_table *table;
0603 int ret;
0604
0605 table = &a->sgt;
0606
0607 ret = dma_map_sgtable(attachment->dev, table, dir, 0);
0608 if (ret)
0609 table = ERR_PTR(ret);
0610 return table;
0611 }
0612
0613 static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
0614 struct sg_table *table,
0615 enum dma_data_direction dir)
0616 {
0617 dma_unmap_sgtable(attach->dev, table, dir, 0);
0618 }
0619
0620 static void fastrpc_release(struct dma_buf *dmabuf)
0621 {
0622 struct fastrpc_buf *buffer = dmabuf->priv;
0623
0624 fastrpc_buf_free(buffer);
0625 }
0626
0627 static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
0628 struct dma_buf_attachment *attachment)
0629 {
0630 struct fastrpc_dma_buf_attachment *a;
0631 struct fastrpc_buf *buffer = dmabuf->priv;
0632 int ret;
0633
0634 a = kzalloc(sizeof(*a), GFP_KERNEL);
0635 if (!a)
0636 return -ENOMEM;
0637
0638 ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt,
0639 FASTRPC_PHYS(buffer->phys), buffer->size);
0640 if (ret < 0) {
0641 dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
0642 kfree(a);
0643 return -EINVAL;
0644 }
0645
0646 a->dev = attachment->dev;
0647 INIT_LIST_HEAD(&a->node);
0648 attachment->priv = a;
0649
0650 mutex_lock(&buffer->lock);
0651 list_add(&a->node, &buffer->attachments);
0652 mutex_unlock(&buffer->lock);
0653
0654 return 0;
0655 }
0656
0657 static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
0658 struct dma_buf_attachment *attachment)
0659 {
0660 struct fastrpc_dma_buf_attachment *a = attachment->priv;
0661 struct fastrpc_buf *buffer = dmabuf->priv;
0662
0663 mutex_lock(&buffer->lock);
0664 list_del(&a->node);
0665 mutex_unlock(&buffer->lock);
0666 sg_free_table(&a->sgt);
0667 kfree(a);
0668 }
0669
0670 static int fastrpc_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
0671 {
0672 struct fastrpc_buf *buf = dmabuf->priv;
0673
0674 iosys_map_set_vaddr(map, buf->virt);
0675
0676 return 0;
0677 }
0678
0679 static int fastrpc_mmap(struct dma_buf *dmabuf,
0680 struct vm_area_struct *vma)
0681 {
0682 struct fastrpc_buf *buf = dmabuf->priv;
0683 size_t size = vma->vm_end - vma->vm_start;
0684
0685 return dma_mmap_coherent(buf->dev, vma, buf->virt,
0686 FASTRPC_PHYS(buf->phys), size);
0687 }
0688
0689 static const struct dma_buf_ops fastrpc_dma_buf_ops = {
0690 .attach = fastrpc_dma_buf_attach,
0691 .detach = fastrpc_dma_buf_detatch,
0692 .map_dma_buf = fastrpc_map_dma_buf,
0693 .unmap_dma_buf = fastrpc_unmap_dma_buf,
0694 .mmap = fastrpc_mmap,
0695 .vmap = fastrpc_vmap,
0696 .release = fastrpc_release,
0697 };
0698
0699 static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
0700 u64 len, u32 attr, struct fastrpc_map **ppmap)
0701 {
0702 struct fastrpc_session_ctx *sess = fl->sctx;
0703 struct fastrpc_map *map = NULL;
0704 int err = 0;
0705
0706 if (!fastrpc_map_find(fl, fd, ppmap))
0707 return 0;
0708
0709 map = kzalloc(sizeof(*map), GFP_KERNEL);
0710 if (!map)
0711 return -ENOMEM;
0712
0713 INIT_LIST_HEAD(&map->node);
0714 map->fl = fl;
0715 map->fd = fd;
0716 map->buf = dma_buf_get(fd);
0717 if (IS_ERR(map->buf)) {
0718 err = PTR_ERR(map->buf);
0719 goto get_err;
0720 }
0721
0722 map->attach = dma_buf_attach(map->buf, sess->dev);
0723 if (IS_ERR(map->attach)) {
0724 dev_err(sess->dev, "Failed to attach dmabuf\n");
0725 err = PTR_ERR(map->attach);
0726 goto attach_err;
0727 }
0728
0729 map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL);
0730 if (IS_ERR(map->table)) {
0731 err = PTR_ERR(map->table);
0732 goto map_err;
0733 }
0734
0735 map->phys = sg_dma_address(map->table->sgl);
0736 map->phys += ((u64)fl->sctx->sid << 32);
0737 map->size = len;
0738 map->va = sg_virt(map->table->sgl);
0739 map->len = len;
0740 kref_init(&map->refcount);
0741
0742 if (attr & FASTRPC_ATTR_SECUREMAP) {
0743
0744
0745
0746
0747 unsigned int perms = BIT(QCOM_SCM_VMID_HLOS);
0748
0749 map->attr = attr;
0750 err = qcom_scm_assign_mem(map->phys, (u64)map->size, &perms,
0751 fl->cctx->vmperms, fl->cctx->vmcount);
0752 if (err) {
0753 dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
0754 map->phys, map->size, err);
0755 goto map_err;
0756 }
0757 }
0758 spin_lock(&fl->lock);
0759 list_add_tail(&map->node, &fl->maps);
0760 spin_unlock(&fl->lock);
0761 *ppmap = map;
0762
0763 return 0;
0764
0765 map_err:
0766 dma_buf_detach(map->buf, map->attach);
0767 attach_err:
0768 dma_buf_put(map->buf);
0769 get_err:
0770 kfree(map);
0771
0772 return err;
0773 }
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802 static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
0803 {
0804 int size = 0;
0805
0806 size = (sizeof(struct fastrpc_remote_buf) +
0807 sizeof(struct fastrpc_invoke_buf) +
0808 sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
0809 sizeof(u64) * FASTRPC_MAX_FDLIST +
0810 sizeof(u32) * FASTRPC_MAX_CRCLIST;
0811
0812 return size;
0813 }
0814
0815 static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
0816 {
0817 u64 size = 0;
0818 int oix;
0819
0820 size = ALIGN(metalen, FASTRPC_ALIGN);
0821 for (oix = 0; oix < ctx->nbufs; oix++) {
0822 int i = ctx->olaps[oix].raix;
0823
0824 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
0825
0826 if (ctx->olaps[oix].offset == 0)
0827 size = ALIGN(size, FASTRPC_ALIGN);
0828
0829 size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart);
0830 }
0831 }
0832
0833 return size;
0834 }
0835
0836 static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
0837 {
0838 struct device *dev = ctx->fl->sctx->dev;
0839 int i, err;
0840
0841 for (i = 0; i < ctx->nscalars; ++i) {
0842
0843 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
0844 ctx->args[i].length == 0)
0845 continue;
0846
0847 err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
0848 ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]);
0849 if (err) {
0850 dev_err(dev, "Error Creating map %d\n", err);
0851 return -EINVAL;
0852 }
0853
0854 }
0855 return 0;
0856 }
0857
0858 static struct fastrpc_invoke_buf *fastrpc_invoke_buf_start(union fastrpc_remote_arg *pra, int len)
0859 {
0860 return (struct fastrpc_invoke_buf *)(&pra[len]);
0861 }
0862
0863 static struct fastrpc_phy_page *fastrpc_phy_page_start(struct fastrpc_invoke_buf *buf, int len)
0864 {
0865 return (struct fastrpc_phy_page *)(&buf[len]);
0866 }
0867
0868 static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
0869 {
0870 struct device *dev = ctx->fl->sctx->dev;
0871 union fastrpc_remote_arg *rpra;
0872 struct fastrpc_invoke_buf *list;
0873 struct fastrpc_phy_page *pages;
0874 int inbufs, i, oix, err = 0;
0875 u64 len, rlen, pkt_size;
0876 u64 pg_start, pg_end;
0877 uintptr_t args;
0878 int metalen;
0879
0880 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
0881 metalen = fastrpc_get_meta_size(ctx);
0882 pkt_size = fastrpc_get_payload_size(ctx, metalen);
0883
0884 err = fastrpc_create_maps(ctx);
0885 if (err)
0886 return err;
0887
0888 ctx->msg_sz = pkt_size;
0889
0890 err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
0891 if (err)
0892 return err;
0893
0894 rpra = ctx->buf->virt;
0895 list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
0896 pages = fastrpc_phy_page_start(list, ctx->nscalars);
0897 args = (uintptr_t)ctx->buf->virt + metalen;
0898 rlen = pkt_size - metalen;
0899 ctx->rpra = rpra;
0900
0901 for (oix = 0; oix < ctx->nbufs; ++oix) {
0902 int mlen;
0903
0904 i = ctx->olaps[oix].raix;
0905 len = ctx->args[i].length;
0906
0907 rpra[i].buf.pv = 0;
0908 rpra[i].buf.len = len;
0909 list[i].num = len ? 1 : 0;
0910 list[i].pgidx = i;
0911
0912 if (!len)
0913 continue;
0914
0915 if (ctx->maps[i]) {
0916 struct vm_area_struct *vma = NULL;
0917
0918 rpra[i].buf.pv = (u64) ctx->args[i].ptr;
0919 pages[i].addr = ctx->maps[i]->phys;
0920
0921 mmap_read_lock(current->mm);
0922 vma = find_vma(current->mm, ctx->args[i].ptr);
0923 if (vma)
0924 pages[i].addr += ctx->args[i].ptr -
0925 vma->vm_start;
0926 mmap_read_unlock(current->mm);
0927
0928 pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
0929 pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
0930 PAGE_SHIFT;
0931 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
0932
0933 } else {
0934
0935 if (ctx->olaps[oix].offset == 0) {
0936 rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
0937 args = ALIGN(args, FASTRPC_ALIGN);
0938 }
0939
0940 mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart;
0941
0942 if (rlen < mlen)
0943 goto bail;
0944
0945 rpra[i].buf.pv = args - ctx->olaps[oix].offset;
0946 pages[i].addr = ctx->buf->phys -
0947 ctx->olaps[oix].offset +
0948 (pkt_size - rlen);
0949 pages[i].addr = pages[i].addr & PAGE_MASK;
0950
0951 pg_start = (args & PAGE_MASK) >> PAGE_SHIFT;
0952 pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
0953 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
0954 args = args + mlen;
0955 rlen -= mlen;
0956 }
0957
0958 if (i < inbufs && !ctx->maps[i]) {
0959 void *dst = (void *)(uintptr_t)rpra[i].buf.pv;
0960 void *src = (void *)(uintptr_t)ctx->args[i].ptr;
0961
0962 if (!kernel) {
0963 if (copy_from_user(dst, (void __user *)src,
0964 len)) {
0965 err = -EFAULT;
0966 goto bail;
0967 }
0968 } else {
0969 memcpy(dst, src, len);
0970 }
0971 }
0972 }
0973
0974 for (i = ctx->nbufs; i < ctx->nscalars; ++i) {
0975 list[i].num = ctx->args[i].length ? 1 : 0;
0976 list[i].pgidx = i;
0977 if (ctx->maps[i]) {
0978 pages[i].addr = ctx->maps[i]->phys;
0979 pages[i].size = ctx->maps[i]->size;
0980 }
0981 rpra[i].dma.fd = ctx->args[i].fd;
0982 rpra[i].dma.len = ctx->args[i].length;
0983 rpra[i].dma.offset = (u64) ctx->args[i].ptr;
0984 }
0985
0986 bail:
0987 if (err)
0988 dev_err(dev, "Error: get invoke args failed:%d\n", err);
0989
0990 return err;
0991 }
0992
0993 static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
0994 u32 kernel)
0995 {
0996 union fastrpc_remote_arg *rpra = ctx->rpra;
0997 struct fastrpc_user *fl = ctx->fl;
0998 struct fastrpc_map *mmap = NULL;
0999 struct fastrpc_invoke_buf *list;
1000 struct fastrpc_phy_page *pages;
1001 u64 *fdlist;
1002 int i, inbufs, outbufs, handles;
1003
1004 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
1005 outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
1006 handles = REMOTE_SCALARS_INHANDLES(ctx->sc) + REMOTE_SCALARS_OUTHANDLES(ctx->sc);
1007 list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
1008 pages = fastrpc_phy_page_start(list, ctx->nscalars);
1009 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
1010
1011 for (i = inbufs; i < ctx->nbufs; ++i) {
1012 if (!ctx->maps[i]) {
1013 void *src = (void *)(uintptr_t)rpra[i].buf.pv;
1014 void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
1015 u64 len = rpra[i].buf.len;
1016
1017 if (!kernel) {
1018 if (copy_to_user((void __user *)dst, src, len))
1019 return -EFAULT;
1020 } else {
1021 memcpy(dst, src, len);
1022 }
1023 }
1024 }
1025
1026 for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
1027 if (!fdlist[i])
1028 break;
1029 if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap))
1030 fastrpc_map_put(mmap);
1031 }
1032
1033 return 0;
1034 }
1035
1036 static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
1037 struct fastrpc_invoke_ctx *ctx,
1038 u32 kernel, uint32_t handle)
1039 {
1040 struct fastrpc_channel_ctx *cctx;
1041 struct fastrpc_user *fl = ctx->fl;
1042 struct fastrpc_msg *msg = &ctx->msg;
1043 int ret;
1044
1045 cctx = fl->cctx;
1046 msg->pid = fl->tgid;
1047 msg->tid = current->pid;
1048
1049 if (kernel)
1050 msg->pid = 0;
1051
1052 msg->ctx = ctx->ctxid | fl->pd;
1053 msg->handle = handle;
1054 msg->sc = ctx->sc;
1055 msg->addr = ctx->buf ? ctx->buf->phys : 0;
1056 msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
1057 fastrpc_context_get(ctx);
1058
1059 ret = rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
1060
1061 if (ret)
1062 fastrpc_context_put(ctx);
1063
1064 return ret;
1065
1066 }
1067
1068 static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
1069 u32 handle, u32 sc,
1070 struct fastrpc_invoke_args *args)
1071 {
1072 struct fastrpc_invoke_ctx *ctx = NULL;
1073 int err = 0;
1074
1075 if (!fl->sctx)
1076 return -EINVAL;
1077
1078 if (!fl->cctx->rpdev)
1079 return -EPIPE;
1080
1081 if (handle == FASTRPC_INIT_HANDLE && !kernel) {
1082 dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle);
1083 return -EPERM;
1084 }
1085
1086 ctx = fastrpc_context_alloc(fl, kernel, sc, args);
1087 if (IS_ERR(ctx))
1088 return PTR_ERR(ctx);
1089
1090 if (ctx->nscalars) {
1091 err = fastrpc_get_args(kernel, ctx);
1092 if (err)
1093 goto bail;
1094 }
1095
1096
1097 dma_wmb();
1098
1099 err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
1100 if (err)
1101 goto bail;
1102
1103 if (kernel) {
1104 if (!wait_for_completion_timeout(&ctx->work, 10 * HZ))
1105 err = -ETIMEDOUT;
1106 } else {
1107 err = wait_for_completion_interruptible(&ctx->work);
1108 }
1109
1110 if (err)
1111 goto bail;
1112
1113
1114 err = ctx->retval;
1115 if (err)
1116 goto bail;
1117
1118 if (ctx->nscalars) {
1119
1120 dma_rmb();
1121
1122 err = fastrpc_put_args(ctx, kernel);
1123 if (err)
1124 goto bail;
1125 }
1126
1127 bail:
1128 if (err != -ERESTARTSYS && err != -ETIMEDOUT) {
1129
1130 spin_lock(&fl->lock);
1131 list_del(&ctx->node);
1132 spin_unlock(&fl->lock);
1133 fastrpc_context_put(ctx);
1134 }
1135 if (err)
1136 dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
1137
1138 return err;
1139 }
1140
1141 static bool is_session_rejected(struct fastrpc_user *fl, bool unsigned_pd_request)
1142 {
1143
1144 if (!fl->is_secure_dev && fl->cctx->secure) {
1145
1146
1147
1148
1149
1150 if (!fl->cctx->unsigned_support || !unsigned_pd_request) {
1151 dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD");
1152 return true;
1153 }
1154 }
1155
1156 return false;
1157 }
1158
1159 static int fastrpc_init_create_process(struct fastrpc_user *fl,
1160 char __user *argp)
1161 {
1162 struct fastrpc_init_create init;
1163 struct fastrpc_invoke_args *args;
1164 struct fastrpc_phy_page pages[1];
1165 struct fastrpc_map *map = NULL;
1166 struct fastrpc_buf *imem = NULL;
1167 int memlen;
1168 int err;
1169 struct {
1170 int pgid;
1171 u32 namelen;
1172 u32 filelen;
1173 u32 pageslen;
1174 u32 attrs;
1175 u32 siglen;
1176 } inbuf;
1177 u32 sc;
1178 bool unsigned_module = false;
1179
1180 args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
1181 if (!args)
1182 return -ENOMEM;
1183
1184 if (copy_from_user(&init, argp, sizeof(init))) {
1185 err = -EFAULT;
1186 goto err;
1187 }
1188
1189 if (init.attrs & FASTRPC_MODE_UNSIGNED_MODULE)
1190 unsigned_module = true;
1191
1192 if (is_session_rejected(fl, unsigned_module)) {
1193 err = -ECONNREFUSED;
1194 goto err;
1195 }
1196
1197 if (init.filelen > INIT_FILELEN_MAX) {
1198 err = -EINVAL;
1199 goto err;
1200 }
1201
1202 inbuf.pgid = fl->tgid;
1203 inbuf.namelen = strlen(current->comm) + 1;
1204 inbuf.filelen = init.filelen;
1205 inbuf.pageslen = 1;
1206 inbuf.attrs = init.attrs;
1207 inbuf.siglen = init.siglen;
1208 fl->pd = USER_PD;
1209
1210 if (init.filelen && init.filefd) {
1211 err = fastrpc_map_create(fl, init.filefd, init.filelen, 0, &map);
1212 if (err)
1213 goto err;
1214 }
1215
1216 memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
1217 1024 * 1024);
1218 err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
1219 &imem);
1220 if (err)
1221 goto err_alloc;
1222
1223 fl->init_mem = imem;
1224 args[0].ptr = (u64)(uintptr_t)&inbuf;
1225 args[0].length = sizeof(inbuf);
1226 args[0].fd = -1;
1227
1228 args[1].ptr = (u64)(uintptr_t)current->comm;
1229 args[1].length = inbuf.namelen;
1230 args[1].fd = -1;
1231
1232 args[2].ptr = (u64) init.file;
1233 args[2].length = inbuf.filelen;
1234 args[2].fd = init.filefd;
1235
1236 pages[0].addr = imem->phys;
1237 pages[0].size = imem->size;
1238
1239 args[3].ptr = (u64)(uintptr_t) pages;
1240 args[3].length = 1 * sizeof(*pages);
1241 args[3].fd = -1;
1242
1243 args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
1244 args[4].length = sizeof(inbuf.attrs);
1245 args[4].fd = -1;
1246
1247 args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
1248 args[5].length = sizeof(inbuf.siglen);
1249 args[5].fd = -1;
1250
1251 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
1252 if (init.attrs)
1253 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0);
1254
1255 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1256 sc, args);
1257 if (err)
1258 goto err_invoke;
1259
1260 kfree(args);
1261
1262 return 0;
1263
1264 err_invoke:
1265 fl->init_mem = NULL;
1266 fastrpc_buf_free(imem);
1267 err_alloc:
1268 if (map) {
1269 spin_lock(&fl->lock);
1270 list_del(&map->node);
1271 spin_unlock(&fl->lock);
1272 fastrpc_map_put(map);
1273 }
1274 err:
1275 kfree(args);
1276
1277 return err;
1278 }
1279
1280 static struct fastrpc_session_ctx *fastrpc_session_alloc(
1281 struct fastrpc_channel_ctx *cctx)
1282 {
1283 struct fastrpc_session_ctx *session = NULL;
1284 unsigned long flags;
1285 int i;
1286
1287 spin_lock_irqsave(&cctx->lock, flags);
1288 for (i = 0; i < cctx->sesscount; i++) {
1289 if (!cctx->session[i].used && cctx->session[i].valid) {
1290 cctx->session[i].used = true;
1291 session = &cctx->session[i];
1292 break;
1293 }
1294 }
1295 spin_unlock_irqrestore(&cctx->lock, flags);
1296
1297 return session;
1298 }
1299
1300 static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
1301 struct fastrpc_session_ctx *session)
1302 {
1303 unsigned long flags;
1304
1305 spin_lock_irqsave(&cctx->lock, flags);
1306 session->used = false;
1307 spin_unlock_irqrestore(&cctx->lock, flags);
1308 }
1309
1310 static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
1311 {
1312 struct fastrpc_invoke_args args[1];
1313 int tgid = 0;
1314 u32 sc;
1315
1316 tgid = fl->tgid;
1317 args[0].ptr = (u64)(uintptr_t) &tgid;
1318 args[0].length = sizeof(tgid);
1319 args[0].fd = -1;
1320 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
1321
1322 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1323 sc, &args[0]);
1324 }
1325
1326 static int fastrpc_device_release(struct inode *inode, struct file *file)
1327 {
1328 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1329 struct fastrpc_channel_ctx *cctx = fl->cctx;
1330 struct fastrpc_invoke_ctx *ctx, *n;
1331 struct fastrpc_map *map, *m;
1332 struct fastrpc_buf *buf, *b;
1333 unsigned long flags;
1334
1335 fastrpc_release_current_dsp_process(fl);
1336
1337 spin_lock_irqsave(&cctx->lock, flags);
1338 list_del(&fl->user);
1339 spin_unlock_irqrestore(&cctx->lock, flags);
1340
1341 if (fl->init_mem)
1342 fastrpc_buf_free(fl->init_mem);
1343
1344 list_for_each_entry_safe(ctx, n, &fl->pending, node) {
1345 list_del(&ctx->node);
1346 fastrpc_context_put(ctx);
1347 }
1348
1349 list_for_each_entry_safe(map, m, &fl->maps, node) {
1350 list_del(&map->node);
1351 fastrpc_map_put(map);
1352 }
1353
1354 list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
1355 list_del(&buf->node);
1356 fastrpc_buf_free(buf);
1357 }
1358
1359 fastrpc_session_free(cctx, fl->sctx);
1360 fastrpc_channel_ctx_put(cctx);
1361
1362 mutex_destroy(&fl->mutex);
1363 kfree(fl);
1364 file->private_data = NULL;
1365
1366 return 0;
1367 }
1368
1369 static int fastrpc_device_open(struct inode *inode, struct file *filp)
1370 {
1371 struct fastrpc_channel_ctx *cctx;
1372 struct fastrpc_device *fdevice;
1373 struct fastrpc_user *fl = NULL;
1374 unsigned long flags;
1375
1376 fdevice = miscdev_to_fdevice(filp->private_data);
1377 cctx = fdevice->cctx;
1378
1379 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
1380 if (!fl)
1381 return -ENOMEM;
1382
1383
1384 fastrpc_channel_ctx_get(cctx);
1385
1386 filp->private_data = fl;
1387 spin_lock_init(&fl->lock);
1388 mutex_init(&fl->mutex);
1389 INIT_LIST_HEAD(&fl->pending);
1390 INIT_LIST_HEAD(&fl->maps);
1391 INIT_LIST_HEAD(&fl->mmaps);
1392 INIT_LIST_HEAD(&fl->user);
1393 fl->tgid = current->tgid;
1394 fl->cctx = cctx;
1395 fl->is_secure_dev = fdevice->secure;
1396
1397 fl->sctx = fastrpc_session_alloc(cctx);
1398 if (!fl->sctx) {
1399 dev_err(&cctx->rpdev->dev, "No session available\n");
1400 mutex_destroy(&fl->mutex);
1401 kfree(fl);
1402
1403 return -EBUSY;
1404 }
1405
1406 spin_lock_irqsave(&cctx->lock, flags);
1407 list_add_tail(&fl->user, &cctx->users);
1408 spin_unlock_irqrestore(&cctx->lock, flags);
1409
1410 return 0;
1411 }
1412
1413 static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
1414 {
1415 struct fastrpc_alloc_dma_buf bp;
1416 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1417 struct fastrpc_buf *buf = NULL;
1418 int err;
1419
1420 if (copy_from_user(&bp, argp, sizeof(bp)))
1421 return -EFAULT;
1422
1423 err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
1424 if (err)
1425 return err;
1426 exp_info.ops = &fastrpc_dma_buf_ops;
1427 exp_info.size = bp.size;
1428 exp_info.flags = O_RDWR;
1429 exp_info.priv = buf;
1430 buf->dmabuf = dma_buf_export(&exp_info);
1431 if (IS_ERR(buf->dmabuf)) {
1432 err = PTR_ERR(buf->dmabuf);
1433 fastrpc_buf_free(buf);
1434 return err;
1435 }
1436
1437 bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE);
1438 if (bp.fd < 0) {
1439 dma_buf_put(buf->dmabuf);
1440 return -EINVAL;
1441 }
1442
1443 if (copy_to_user(argp, &bp, sizeof(bp))) {
1444
1445
1446
1447
1448
1449
1450
1451
1452 return -EFAULT;
1453 }
1454
1455 return 0;
1456 }
1457
1458 static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
1459 {
1460 struct fastrpc_invoke_args args[1];
1461 int tgid = fl->tgid;
1462 u32 sc;
1463
1464 args[0].ptr = (u64)(uintptr_t) &tgid;
1465 args[0].length = sizeof(tgid);
1466 args[0].fd = -1;
1467 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
1468 fl->pd = pd;
1469
1470 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1471 sc, &args[0]);
1472 }
1473
1474 static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
1475 {
1476 struct fastrpc_invoke_args *args = NULL;
1477 struct fastrpc_invoke inv;
1478 u32 nscalars;
1479 int err;
1480
1481 if (copy_from_user(&inv, argp, sizeof(inv)))
1482 return -EFAULT;
1483
1484
1485 nscalars = REMOTE_SCALARS_LENGTH(inv.sc);
1486 if (nscalars) {
1487 args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL);
1488 if (!args)
1489 return -ENOMEM;
1490
1491 if (copy_from_user(args, (void __user *)(uintptr_t)inv.args,
1492 nscalars * sizeof(*args))) {
1493 kfree(args);
1494 return -EFAULT;
1495 }
1496 }
1497
1498 err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
1499 kfree(args);
1500
1501 return err;
1502 }
1503
1504 static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr_buf,
1505 uint32_t dsp_attr_buf_len)
1506 {
1507 struct fastrpc_invoke_args args[2] = { 0 };
1508
1509
1510 dsp_attr_buf[0] = 0;
1511
1512 args[0].ptr = (u64)(uintptr_t)&dsp_attr_buf_len;
1513 args[0].length = sizeof(dsp_attr_buf_len);
1514 args[0].fd = -1;
1515 args[1].ptr = (u64)(uintptr_t)&dsp_attr_buf[1];
1516 args[1].length = dsp_attr_buf_len;
1517 args[1].fd = -1;
1518 fl->pd = 1;
1519
1520 return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE,
1521 FASTRPC_SCALARS(0, 1, 1), args);
1522 }
1523
1524 static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap,
1525 struct fastrpc_user *fl)
1526 {
1527 struct fastrpc_channel_ctx *cctx = fl->cctx;
1528 uint32_t attribute_id = cap->attribute_id;
1529 uint32_t *dsp_attributes;
1530 unsigned long flags;
1531 uint32_t domain = cap->domain;
1532 int err;
1533
1534 spin_lock_irqsave(&cctx->lock, flags);
1535
1536 if (cctx->valid_attributes) {
1537 spin_unlock_irqrestore(&cctx->lock, flags);
1538 goto done;
1539 }
1540 spin_unlock_irqrestore(&cctx->lock, flags);
1541
1542 dsp_attributes = kzalloc(FASTRPC_MAX_DSP_ATTRIBUTES_LEN, GFP_KERNEL);
1543 if (!dsp_attributes)
1544 return -ENOMEM;
1545
1546 err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN);
1547 if (err == DSP_UNSUPPORTED_API) {
1548 dev_info(&cctx->rpdev->dev,
1549 "Warning: DSP capabilities not supported on domain: %d\n", domain);
1550 kfree(dsp_attributes);
1551 return -EOPNOTSUPP;
1552 } else if (err) {
1553 dev_err(&cctx->rpdev->dev, "Error: dsp information is incorrect err: %d\n", err);
1554 kfree(dsp_attributes);
1555 return err;
1556 }
1557
1558 spin_lock_irqsave(&cctx->lock, flags);
1559 memcpy(cctx->dsp_attributes, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN);
1560 cctx->valid_attributes = true;
1561 spin_unlock_irqrestore(&cctx->lock, flags);
1562 kfree(dsp_attributes);
1563 done:
1564 cap->capability = cctx->dsp_attributes[attribute_id];
1565 return 0;
1566 }
1567
1568 static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp)
1569 {
1570 struct fastrpc_ioctl_capability cap = {0};
1571 int err = 0;
1572
1573 if (copy_from_user(&cap, argp, sizeof(cap)))
1574 return -EFAULT;
1575
1576 cap.capability = 0;
1577 if (cap.domain >= FASTRPC_DEV_MAX) {
1578 dev_err(&fl->cctx->rpdev->dev, "Error: Invalid domain id:%d, err:%d\n",
1579 cap.domain, err);
1580 return -ECHRNG;
1581 }
1582
1583
1584 if (cap.domain == MDSP_DOMAIN_ID) {
1585 dev_err(&fl->cctx->rpdev->dev, "Error: modem not supported %d\n", err);
1586 return -ECHRNG;
1587 }
1588
1589 if (cap.attribute_id >= FASTRPC_MAX_DSP_ATTRIBUTES) {
1590 dev_err(&fl->cctx->rpdev->dev, "Error: invalid attribute: %d, err: %d\n",
1591 cap.attribute_id, err);
1592 return -EOVERFLOW;
1593 }
1594
1595 err = fastrpc_get_info_from_kernel(&cap, fl);
1596 if (err)
1597 return err;
1598
1599 if (copy_to_user(argp, &cap.capability, sizeof(cap.capability)))
1600 return -EFAULT;
1601
1602 return 0;
1603 }
1604
1605 static int fastrpc_req_munmap_impl(struct fastrpc_user *fl,
1606 struct fastrpc_req_munmap *req)
1607 {
1608 struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
1609 struct fastrpc_buf *buf = NULL, *iter, *b;
1610 struct fastrpc_munmap_req_msg req_msg;
1611 struct device *dev = fl->sctx->dev;
1612 int err;
1613 u32 sc;
1614
1615 spin_lock(&fl->lock);
1616 list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
1617 if ((iter->raddr == req->vaddrout) && (iter->size == req->size)) {
1618 buf = iter;
1619 break;
1620 }
1621 }
1622 spin_unlock(&fl->lock);
1623
1624 if (!buf) {
1625 dev_err(dev, "mmap not in list\n");
1626 return -EINVAL;
1627 }
1628
1629 req_msg.pgid = fl->tgid;
1630 req_msg.size = buf->size;
1631 req_msg.vaddr = buf->raddr;
1632
1633 args[0].ptr = (u64) (uintptr_t) &req_msg;
1634 args[0].length = sizeof(req_msg);
1635
1636 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0);
1637 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1638 &args[0]);
1639 if (!err) {
1640 dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr);
1641 spin_lock(&fl->lock);
1642 list_del(&buf->node);
1643 spin_unlock(&fl->lock);
1644 fastrpc_buf_free(buf);
1645 } else {
1646 dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr);
1647 }
1648
1649 return err;
1650 }
1651
1652 static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
1653 {
1654 struct fastrpc_req_munmap req;
1655
1656 if (copy_from_user(&req, argp, sizeof(req)))
1657 return -EFAULT;
1658
1659 return fastrpc_req_munmap_impl(fl, &req);
1660 }
1661
1662 static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
1663 {
1664 struct fastrpc_invoke_args args[3] = { [0 ... 2] = { 0 } };
1665 struct fastrpc_buf *buf = NULL;
1666 struct fastrpc_mmap_req_msg req_msg;
1667 struct fastrpc_mmap_rsp_msg rsp_msg;
1668 struct fastrpc_req_munmap req_unmap;
1669 struct fastrpc_phy_page pages;
1670 struct fastrpc_req_mmap req;
1671 struct device *dev = fl->sctx->dev;
1672 int err;
1673 u32 sc;
1674
1675 if (copy_from_user(&req, argp, sizeof(req)))
1676 return -EFAULT;
1677
1678 if (req.flags != ADSP_MMAP_ADD_PAGES) {
1679 dev_err(dev, "flag not supported 0x%x\n", req.flags);
1680 return -EINVAL;
1681 }
1682
1683 if (req.vaddrin) {
1684 dev_err(dev, "adding user allocated pages is not supported\n");
1685 return -EINVAL;
1686 }
1687
1688 err = fastrpc_buf_alloc(fl, fl->sctx->dev, req.size, &buf);
1689 if (err) {
1690 dev_err(dev, "failed to allocate buffer\n");
1691 return err;
1692 }
1693
1694 req_msg.pgid = fl->tgid;
1695 req_msg.flags = req.flags;
1696 req_msg.vaddr = req.vaddrin;
1697 req_msg.num = sizeof(pages);
1698
1699 args[0].ptr = (u64) (uintptr_t) &req_msg;
1700 args[0].length = sizeof(req_msg);
1701
1702 pages.addr = buf->phys;
1703 pages.size = buf->size;
1704
1705 args[1].ptr = (u64) (uintptr_t) &pages;
1706 args[1].length = sizeof(pages);
1707
1708 args[2].ptr = (u64) (uintptr_t) &rsp_msg;
1709 args[2].length = sizeof(rsp_msg);
1710
1711 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1);
1712 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1713 &args[0]);
1714 if (err) {
1715 dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size);
1716 goto err_invoke;
1717 }
1718
1719
1720 buf->raddr = (uintptr_t) rsp_msg.vaddr;
1721
1722
1723 req.vaddrout = rsp_msg.vaddr;
1724
1725 spin_lock(&fl->lock);
1726 list_add_tail(&buf->node, &fl->mmaps);
1727 spin_unlock(&fl->lock);
1728
1729 if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
1730
1731 req_unmap.vaddrout = buf->raddr;
1732 req_unmap.size = buf->size;
1733 fastrpc_req_munmap_impl(fl, &req_unmap);
1734 return -EFAULT;
1735 }
1736
1737 dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
1738 buf->raddr, buf->size);
1739
1740 return 0;
1741
1742 err_invoke:
1743 fastrpc_buf_free(buf);
1744
1745 return err;
1746 }
1747
1748 static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_mem_unmap *req)
1749 {
1750 struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
1751 struct fastrpc_map *map = NULL, *iter, *m;
1752 struct fastrpc_mem_unmap_req_msg req_msg = { 0 };
1753 int err = 0;
1754 u32 sc;
1755 struct device *dev = fl->sctx->dev;
1756
1757 spin_lock(&fl->lock);
1758 list_for_each_entry_safe(iter, m, &fl->maps, node) {
1759 if ((req->fd < 0 || iter->fd == req->fd) && (iter->raddr == req->vaddr)) {
1760 map = iter;
1761 break;
1762 }
1763 }
1764
1765 spin_unlock(&fl->lock);
1766
1767 if (!map) {
1768 dev_err(dev, "map not in list\n");
1769 return -EINVAL;
1770 }
1771
1772 req_msg.pgid = fl->tgid;
1773 req_msg.len = map->len;
1774 req_msg.vaddrin = map->raddr;
1775 req_msg.fd = map->fd;
1776
1777 args[0].ptr = (u64) (uintptr_t) &req_msg;
1778 args[0].length = sizeof(req_msg);
1779
1780 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_UNMAP, 1, 0);
1781 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1782 &args[0]);
1783 fastrpc_map_put(map);
1784 if (err)
1785 dev_err(dev, "unmmap\tpt fd = %d, 0x%09llx error\n", map->fd, map->raddr);
1786
1787 return err;
1788 }
1789
1790 static int fastrpc_req_mem_unmap(struct fastrpc_user *fl, char __user *argp)
1791 {
1792 struct fastrpc_mem_unmap req;
1793
1794 if (copy_from_user(&req, argp, sizeof(req)))
1795 return -EFAULT;
1796
1797 return fastrpc_req_mem_unmap_impl(fl, &req);
1798 }
1799
1800 static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
1801 {
1802 struct fastrpc_invoke_args args[4] = { [0 ... 3] = { 0 } };
1803 struct fastrpc_mem_map_req_msg req_msg = { 0 };
1804 struct fastrpc_mmap_rsp_msg rsp_msg = { 0 };
1805 struct fastrpc_mem_unmap req_unmap = { 0 };
1806 struct fastrpc_phy_page pages = { 0 };
1807 struct fastrpc_mem_map req;
1808 struct device *dev = fl->sctx->dev;
1809 struct fastrpc_map *map = NULL;
1810 int err;
1811 u32 sc;
1812
1813 if (copy_from_user(&req, argp, sizeof(req)))
1814 return -EFAULT;
1815
1816
1817 err = fastrpc_map_create(fl, req.fd, req.length, 0, &map);
1818 if (err) {
1819 dev_err(dev, "failed to map buffer, fd = %d\n", req.fd);
1820 return err;
1821 }
1822
1823 req_msg.pgid = fl->tgid;
1824 req_msg.fd = req.fd;
1825 req_msg.offset = req.offset;
1826 req_msg.vaddrin = req.vaddrin;
1827 map->va = (void *) (uintptr_t) req.vaddrin;
1828 req_msg.flags = req.flags;
1829 req_msg.num = sizeof(pages);
1830 req_msg.data_len = 0;
1831
1832 args[0].ptr = (u64) (uintptr_t) &req_msg;
1833 args[0].length = sizeof(req_msg);
1834
1835 pages.addr = map->phys;
1836 pages.size = map->size;
1837
1838 args[1].ptr = (u64) (uintptr_t) &pages;
1839 args[1].length = sizeof(pages);
1840
1841 args[2].ptr = (u64) (uintptr_t) &pages;
1842 args[2].length = 0;
1843
1844 args[3].ptr = (u64) (uintptr_t) &rsp_msg;
1845 args[3].length = sizeof(rsp_msg);
1846
1847 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_MAP, 3, 1);
1848 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]);
1849 if (err) {
1850 dev_err(dev, "mem mmap error, fd %d, vaddr %llx, size %lld\n",
1851 req.fd, req.vaddrin, map->size);
1852 goto err_invoke;
1853 }
1854
1855
1856 map->raddr = rsp_msg.vaddr;
1857
1858
1859 req.vaddrout = rsp_msg.vaddr;
1860
1861 if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
1862
1863 req_unmap.vaddr = (uintptr_t) rsp_msg.vaddr;
1864 req_unmap.length = map->size;
1865 fastrpc_req_mem_unmap_impl(fl, &req_unmap);
1866 return -EFAULT;
1867 }
1868
1869 return 0;
1870
1871 err_invoke:
1872 fastrpc_map_put(map);
1873
1874 return err;
1875 }
1876
1877 static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
1878 unsigned long arg)
1879 {
1880 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1881 char __user *argp = (char __user *)arg;
1882 int err;
1883
1884 switch (cmd) {
1885 case FASTRPC_IOCTL_INVOKE:
1886 err = fastrpc_invoke(fl, argp);
1887 break;
1888 case FASTRPC_IOCTL_INIT_ATTACH:
1889 err = fastrpc_init_attach(fl, AUDIO_PD);
1890 break;
1891 case FASTRPC_IOCTL_INIT_ATTACH_SNS:
1892 err = fastrpc_init_attach(fl, SENSORS_PD);
1893 break;
1894 case FASTRPC_IOCTL_INIT_CREATE:
1895 err = fastrpc_init_create_process(fl, argp);
1896 break;
1897 case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
1898 err = fastrpc_dmabuf_alloc(fl, argp);
1899 break;
1900 case FASTRPC_IOCTL_MMAP:
1901 err = fastrpc_req_mmap(fl, argp);
1902 break;
1903 case FASTRPC_IOCTL_MUNMAP:
1904 err = fastrpc_req_munmap(fl, argp);
1905 break;
1906 case FASTRPC_IOCTL_MEM_MAP:
1907 err = fastrpc_req_mem_map(fl, argp);
1908 break;
1909 case FASTRPC_IOCTL_MEM_UNMAP:
1910 err = fastrpc_req_mem_unmap(fl, argp);
1911 break;
1912 case FASTRPC_IOCTL_GET_DSP_INFO:
1913 err = fastrpc_get_dsp_info(fl, argp);
1914 break;
1915 default:
1916 err = -ENOTTY;
1917 break;
1918 }
1919
1920 return err;
1921 }
1922
1923 static const struct file_operations fastrpc_fops = {
1924 .open = fastrpc_device_open,
1925 .release = fastrpc_device_release,
1926 .unlocked_ioctl = fastrpc_device_ioctl,
1927 .compat_ioctl = fastrpc_device_ioctl,
1928 };
1929
1930 static int fastrpc_cb_probe(struct platform_device *pdev)
1931 {
1932 struct fastrpc_channel_ctx *cctx;
1933 struct fastrpc_session_ctx *sess;
1934 struct device *dev = &pdev->dev;
1935 int i, sessions = 0;
1936 unsigned long flags;
1937 int rc;
1938
1939 cctx = dev_get_drvdata(dev->parent);
1940 if (!cctx)
1941 return -EINVAL;
1942
1943 of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
1944
1945 spin_lock_irqsave(&cctx->lock, flags);
1946 if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) {
1947 dev_err(&pdev->dev, "too many sessions\n");
1948 spin_unlock_irqrestore(&cctx->lock, flags);
1949 return -ENOSPC;
1950 }
1951 sess = &cctx->session[cctx->sesscount++];
1952 sess->used = false;
1953 sess->valid = true;
1954 sess->dev = dev;
1955 dev_set_drvdata(dev, sess);
1956
1957 if (of_property_read_u32(dev->of_node, "reg", &sess->sid))
1958 dev_info(dev, "FastRPC Session ID not specified in DT\n");
1959
1960 if (sessions > 0) {
1961 struct fastrpc_session_ctx *dup_sess;
1962
1963 for (i = 1; i < sessions; i++) {
1964 if (cctx->sesscount >= FASTRPC_MAX_SESSIONS)
1965 break;
1966 dup_sess = &cctx->session[cctx->sesscount++];
1967 memcpy(dup_sess, sess, sizeof(*dup_sess));
1968 }
1969 }
1970 spin_unlock_irqrestore(&cctx->lock, flags);
1971 rc = dma_set_mask(dev, DMA_BIT_MASK(32));
1972 if (rc) {
1973 dev_err(dev, "32-bit DMA enable failed\n");
1974 return rc;
1975 }
1976
1977 return 0;
1978 }
1979
1980 static int fastrpc_cb_remove(struct platform_device *pdev)
1981 {
1982 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
1983 struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
1984 unsigned long flags;
1985 int i;
1986
1987 spin_lock_irqsave(&cctx->lock, flags);
1988 for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
1989 if (cctx->session[i].sid == sess->sid) {
1990 cctx->session[i].valid = false;
1991 cctx->sesscount--;
1992 }
1993 }
1994 spin_unlock_irqrestore(&cctx->lock, flags);
1995
1996 return 0;
1997 }
1998
1999 static const struct of_device_id fastrpc_match_table[] = {
2000 { .compatible = "qcom,fastrpc-compute-cb", },
2001 {}
2002 };
2003
2004 static struct platform_driver fastrpc_cb_driver = {
2005 .probe = fastrpc_cb_probe,
2006 .remove = fastrpc_cb_remove,
2007 .driver = {
2008 .name = "qcom,fastrpc-cb",
2009 .of_match_table = fastrpc_match_table,
2010 .suppress_bind_attrs = true,
2011 },
2012 };
2013
2014 static int fastrpc_device_register(struct device *dev, struct fastrpc_channel_ctx *cctx,
2015 bool is_secured, const char *domain)
2016 {
2017 struct fastrpc_device *fdev;
2018 int err;
2019
2020 fdev = devm_kzalloc(dev, sizeof(*fdev), GFP_KERNEL);
2021 if (!fdev)
2022 return -ENOMEM;
2023
2024 fdev->secure = is_secured;
2025 fdev->cctx = cctx;
2026 fdev->miscdev.minor = MISC_DYNAMIC_MINOR;
2027 fdev->miscdev.fops = &fastrpc_fops;
2028 fdev->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "fastrpc-%s%s",
2029 domain, is_secured ? "-secure" : "");
2030 err = misc_register(&fdev->miscdev);
2031 if (!err) {
2032 if (is_secured)
2033 cctx->secure_fdevice = fdev;
2034 else
2035 cctx->fdevice = fdev;
2036 }
2037
2038 return err;
2039 }
2040
2041 static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
2042 {
2043 struct device *rdev = &rpdev->dev;
2044 struct fastrpc_channel_ctx *data;
2045 int i, err, domain_id = -1, vmcount;
2046 const char *domain;
2047 bool secure_dsp;
2048 unsigned int vmids[FASTRPC_MAX_VMIDS];
2049
2050 err = of_property_read_string(rdev->of_node, "label", &domain);
2051 if (err) {
2052 dev_info(rdev, "FastRPC Domain not specified in DT\n");
2053 return err;
2054 }
2055
2056 for (i = 0; i <= CDSP_DOMAIN_ID; i++) {
2057 if (!strcmp(domains[i], domain)) {
2058 domain_id = i;
2059 break;
2060 }
2061 }
2062
2063 if (domain_id < 0) {
2064 dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id);
2065 return -EINVAL;
2066 }
2067
2068 vmcount = of_property_read_variable_u32_array(rdev->of_node,
2069 "qcom,vmids", &vmids[0], 0, FASTRPC_MAX_VMIDS);
2070 if (vmcount < 0)
2071 vmcount = 0;
2072 else if (!qcom_scm_is_available())
2073 return -EPROBE_DEFER;
2074
2075 data = kzalloc(sizeof(*data), GFP_KERNEL);
2076 if (!data)
2077 return -ENOMEM;
2078
2079 if (vmcount) {
2080 data->vmcount = vmcount;
2081 data->perms = BIT(QCOM_SCM_VMID_HLOS);
2082 for (i = 0; i < data->vmcount; i++) {
2083 data->vmperms[i].vmid = vmids[i];
2084 data->vmperms[i].perm = QCOM_SCM_PERM_RWX;
2085 }
2086 }
2087
2088 secure_dsp = !(of_property_read_bool(rdev->of_node, "qcom,non-secure-domain"));
2089 data->secure = secure_dsp;
2090
2091 switch (domain_id) {
2092 case ADSP_DOMAIN_ID:
2093 case MDSP_DOMAIN_ID:
2094 case SDSP_DOMAIN_ID:
2095
2096 data->unsigned_support = false;
2097 err = fastrpc_device_register(rdev, data, secure_dsp, domains[domain_id]);
2098 if (err)
2099 goto fdev_error;
2100 break;
2101 case CDSP_DOMAIN_ID:
2102 data->unsigned_support = true;
2103
2104 err = fastrpc_device_register(rdev, data, true, domains[domain_id]);
2105 if (err)
2106 goto fdev_error;
2107
2108 err = fastrpc_device_register(rdev, data, false, domains[domain_id]);
2109 if (err)
2110 goto fdev_error;
2111 break;
2112 default:
2113 err = -EINVAL;
2114 goto fdev_error;
2115 }
2116
2117 kref_init(&data->refcount);
2118
2119 dev_set_drvdata(&rpdev->dev, data);
2120 dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
2121 INIT_LIST_HEAD(&data->users);
2122 spin_lock_init(&data->lock);
2123 idr_init(&data->ctx_idr);
2124 data->domain_id = domain_id;
2125 data->rpdev = rpdev;
2126
2127 return of_platform_populate(rdev->of_node, NULL, NULL, rdev);
2128 fdev_error:
2129 kfree(data);
2130 return err;
2131 }
2132
2133 static void fastrpc_notify_users(struct fastrpc_user *user)
2134 {
2135 struct fastrpc_invoke_ctx *ctx;
2136
2137 spin_lock(&user->lock);
2138 list_for_each_entry(ctx, &user->pending, node)
2139 complete(&ctx->work);
2140 spin_unlock(&user->lock);
2141 }
2142
2143 static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
2144 {
2145 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
2146 struct fastrpc_user *user;
2147 unsigned long flags;
2148
2149 spin_lock_irqsave(&cctx->lock, flags);
2150 list_for_each_entry(user, &cctx->users, user)
2151 fastrpc_notify_users(user);
2152 spin_unlock_irqrestore(&cctx->lock, flags);
2153
2154 if (cctx->fdevice)
2155 misc_deregister(&cctx->fdevice->miscdev);
2156
2157 if (cctx->secure_fdevice)
2158 misc_deregister(&cctx->secure_fdevice->miscdev);
2159
2160 of_platform_depopulate(&rpdev->dev);
2161
2162 cctx->rpdev = NULL;
2163 fastrpc_channel_ctx_put(cctx);
2164 }
2165
2166 static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
2167 int len, void *priv, u32 addr)
2168 {
2169 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
2170 struct fastrpc_invoke_rsp *rsp = data;
2171 struct fastrpc_invoke_ctx *ctx;
2172 unsigned long flags;
2173 unsigned long ctxid;
2174
2175 if (len < sizeof(*rsp))
2176 return -EINVAL;
2177
2178 ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
2179
2180 spin_lock_irqsave(&cctx->lock, flags);
2181 ctx = idr_find(&cctx->ctx_idr, ctxid);
2182 spin_unlock_irqrestore(&cctx->lock, flags);
2183
2184 if (!ctx) {
2185 dev_err(&rpdev->dev, "No context ID matches response\n");
2186 return -ENOENT;
2187 }
2188
2189 ctx->retval = rsp->retval;
2190 complete(&ctx->work);
2191
2192
2193
2194
2195
2196
2197 schedule_work(&ctx->put_work);
2198
2199 return 0;
2200 }
2201
2202 static const struct of_device_id fastrpc_rpmsg_of_match[] = {
2203 { .compatible = "qcom,fastrpc" },
2204 { },
2205 };
2206 MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match);
2207
2208 static struct rpmsg_driver fastrpc_driver = {
2209 .probe = fastrpc_rpmsg_probe,
2210 .remove = fastrpc_rpmsg_remove,
2211 .callback = fastrpc_rpmsg_callback,
2212 .drv = {
2213 .name = "qcom,fastrpc",
2214 .of_match_table = fastrpc_rpmsg_of_match,
2215 },
2216 };
2217
2218 static int fastrpc_init(void)
2219 {
2220 int ret;
2221
2222 ret = platform_driver_register(&fastrpc_cb_driver);
2223 if (ret < 0) {
2224 pr_err("fastrpc: failed to register cb driver\n");
2225 return ret;
2226 }
2227
2228 ret = register_rpmsg_driver(&fastrpc_driver);
2229 if (ret < 0) {
2230 pr_err("fastrpc: failed to register rpmsg driver\n");
2231 platform_driver_unregister(&fastrpc_cb_driver);
2232 return ret;
2233 }
2234
2235 return 0;
2236 }
2237 module_init(fastrpc_init);
2238
2239 static void fastrpc_exit(void)
2240 {
2241 platform_driver_unregister(&fastrpc_cb_driver);
2242 unregister_rpmsg_driver(&fastrpc_driver);
2243 }
2244 module_exit(fastrpc_exit);
2245
2246 MODULE_LICENSE("GPL v2");
2247 MODULE_IMPORT_NS(DMA_BUF);