Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (c) 2015-2021, Linaro Limited
0004  * Copyright (c) 2016, EPAM Systems
0005  */
0006 
0007 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0008 
0009 #include <linux/arm-smccc.h>
0010 #include <linux/errno.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/io.h>
0013 #include <linux/irqdomain.h>
0014 #include <linux/mm.h>
0015 #include <linux/module.h>
0016 #include <linux/of.h>
0017 #include <linux/of_irq.h>
0018 #include <linux/of_platform.h>
0019 #include <linux/platform_device.h>
0020 #include <linux/sched.h>
0021 #include <linux/slab.h>
0022 #include <linux/string.h>
0023 #include <linux/tee_drv.h>
0024 #include <linux/types.h>
0025 #include <linux/workqueue.h>
0026 #include "optee_private.h"
0027 #include "optee_smc.h"
0028 #include "optee_rpc_cmd.h"
0029 #include <linux/kmemleak.h>
0030 #define CREATE_TRACE_POINTS
0031 #include "optee_trace.h"
0032 
0033 /*
0034  * This file implement the SMC ABI used when communicating with secure world
0035  * OP-TEE OS via raw SMCs.
0036  * This file is divided into the following sections:
0037  * 1. Convert between struct tee_param and struct optee_msg_param
0038  * 2. Low level support functions to register shared memory in secure world
0039  * 3. Dynamic shared memory pool based on alloc_pages()
0040  * 4. Do a normal scheduled call into secure world
0041  * 5. Asynchronous notification
0042  * 6. Driver initialization.
0043  */
0044 
0045 /*
0046  * A typical OP-TEE private shm allocation is 224 bytes (argument struct
0047  * with 6 parameters, needed for open session). So with an alignment of 512
0048  * we'll waste a bit more than 50%. However, it's only expected that we'll
0049  * have a handful of these structs allocated at a time. Most memory will
0050  * be allocated aligned to the page size, So all in all this should scale
0051  * up and down quite well.
0052  */
0053 #define OPTEE_MIN_STATIC_POOL_ALIGN    9 /* 512 bytes aligned */
0054 
0055 /*
0056  * 1. Convert between struct tee_param and struct optee_msg_param
0057  *
0058  * optee_from_msg_param() and optee_to_msg_param() are the main
0059  * functions.
0060  */
0061 
0062 static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr,
0063                   const struct optee_msg_param *mp)
0064 {
0065     struct tee_shm *shm;
0066     phys_addr_t pa;
0067     int rc;
0068 
0069     p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
0070           attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
0071     p->u.memref.size = mp->u.tmem.size;
0072     shm = (struct tee_shm *)(unsigned long)mp->u.tmem.shm_ref;
0073     if (!shm) {
0074         p->u.memref.shm_offs = 0;
0075         p->u.memref.shm = NULL;
0076         return 0;
0077     }
0078 
0079     rc = tee_shm_get_pa(shm, 0, &pa);
0080     if (rc)
0081         return rc;
0082 
0083     p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
0084     p->u.memref.shm = shm;
0085 
0086     return 0;
0087 }
0088 
0089 static void from_msg_param_reg_mem(struct tee_param *p, u32 attr,
0090                    const struct optee_msg_param *mp)
0091 {
0092     struct tee_shm *shm;
0093 
0094     p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
0095           attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
0096     p->u.memref.size = mp->u.rmem.size;
0097     shm = (struct tee_shm *)(unsigned long)mp->u.rmem.shm_ref;
0098 
0099     if (shm) {
0100         p->u.memref.shm_offs = mp->u.rmem.offs;
0101         p->u.memref.shm = shm;
0102     } else {
0103         p->u.memref.shm_offs = 0;
0104         p->u.memref.shm = NULL;
0105     }
0106 }
0107 
0108 /**
0109  * optee_from_msg_param() - convert from OPTEE_MSG parameters to
0110  *              struct tee_param
0111  * @optee:  main service struct
0112  * @params: subsystem internal parameter representation
0113  * @num_params: number of elements in the parameter arrays
0114  * @msg_params: OPTEE_MSG parameters
0115  * Returns 0 on success or <0 on failure
0116  */
0117 static int optee_from_msg_param(struct optee *optee, struct tee_param *params,
0118                 size_t num_params,
0119                 const struct optee_msg_param *msg_params)
0120 {
0121     int rc;
0122     size_t n;
0123 
0124     for (n = 0; n < num_params; n++) {
0125         struct tee_param *p = params + n;
0126         const struct optee_msg_param *mp = msg_params + n;
0127         u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
0128 
0129         switch (attr) {
0130         case OPTEE_MSG_ATTR_TYPE_NONE:
0131             p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
0132             memset(&p->u, 0, sizeof(p->u));
0133             break;
0134         case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
0135         case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
0136         case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
0137             optee_from_msg_param_value(p, attr, mp);
0138             break;
0139         case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
0140         case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
0141         case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
0142             rc = from_msg_param_tmp_mem(p, attr, mp);
0143             if (rc)
0144                 return rc;
0145             break;
0146         case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
0147         case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
0148         case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
0149             from_msg_param_reg_mem(p, attr, mp);
0150             break;
0151 
0152         default:
0153             return -EINVAL;
0154         }
0155     }
0156     return 0;
0157 }
0158 
0159 static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
0160                 const struct tee_param *p)
0161 {
0162     int rc;
0163     phys_addr_t pa;
0164 
0165     mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
0166            TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
0167 
0168     mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
0169     mp->u.tmem.size = p->u.memref.size;
0170 
0171     if (!p->u.memref.shm) {
0172         mp->u.tmem.buf_ptr = 0;
0173         return 0;
0174     }
0175 
0176     rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
0177     if (rc)
0178         return rc;
0179 
0180     mp->u.tmem.buf_ptr = pa;
0181     mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
0182             OPTEE_MSG_ATTR_CACHE_SHIFT;
0183 
0184     return 0;
0185 }
0186 
0187 static int to_msg_param_reg_mem(struct optee_msg_param *mp,
0188                 const struct tee_param *p)
0189 {
0190     mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
0191            TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
0192 
0193     mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
0194     mp->u.rmem.size = p->u.memref.size;
0195     mp->u.rmem.offs = p->u.memref.shm_offs;
0196     return 0;
0197 }
0198 
0199 /**
0200  * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
0201  * @optee:  main service struct
0202  * @msg_params: OPTEE_MSG parameters
0203  * @num_params: number of elements in the parameter arrays
0204  * @params: subsystem itnernal parameter representation
0205  * Returns 0 on success or <0 on failure
0206  */
0207 static int optee_to_msg_param(struct optee *optee,
0208                   struct optee_msg_param *msg_params,
0209                   size_t num_params, const struct tee_param *params)
0210 {
0211     int rc;
0212     size_t n;
0213 
0214     for (n = 0; n < num_params; n++) {
0215         const struct tee_param *p = params + n;
0216         struct optee_msg_param *mp = msg_params + n;
0217 
0218         switch (p->attr) {
0219         case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
0220             mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
0221             memset(&mp->u, 0, sizeof(mp->u));
0222             break;
0223         case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
0224         case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
0225         case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
0226             optee_to_msg_param_value(mp, p);
0227             break;
0228         case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
0229         case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
0230         case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
0231             if (tee_shm_is_dynamic(p->u.memref.shm))
0232                 rc = to_msg_param_reg_mem(mp, p);
0233             else
0234                 rc = to_msg_param_tmp_mem(mp, p);
0235             if (rc)
0236                 return rc;
0237             break;
0238         default:
0239             return -EINVAL;
0240         }
0241     }
0242     return 0;
0243 }
0244 
0245 /*
0246  * 2. Low level support functions to register shared memory in secure world
0247  *
0248  * Functions to enable/disable shared memory caching in secure world, that
0249  * is, lazy freeing of previously allocated shared memory. Freeing is
0250  * performed when a request has been compled.
0251  *
0252  * Functions to register and unregister shared memory both for normal
0253  * clients and for tee-supplicant.
0254  */
0255 
0256 /**
0257  * optee_enable_shm_cache() - Enables caching of some shared memory allocation
0258  *                in OP-TEE
0259  * @optee:  main service struct
0260  */
0261 static void optee_enable_shm_cache(struct optee *optee)
0262 {
0263     struct optee_call_waiter w;
0264 
0265     /* We need to retry until secure world isn't busy. */
0266     optee_cq_wait_init(&optee->call_queue, &w);
0267     while (true) {
0268         struct arm_smccc_res res;
0269 
0270         optee->smc.invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE,
0271                      0, 0, 0, 0, 0, 0, 0, &res);
0272         if (res.a0 == OPTEE_SMC_RETURN_OK)
0273             break;
0274         optee_cq_wait_for_completion(&optee->call_queue, &w);
0275     }
0276     optee_cq_wait_final(&optee->call_queue, &w);
0277 }
0278 
0279 /**
0280  * __optee_disable_shm_cache() - Disables caching of some shared memory
0281  *               allocation in OP-TEE
0282  * @optee:  main service struct
0283  * @is_mapped:  true if the cached shared memory addresses were mapped by this
0284  *      kernel, are safe to dereference, and should be freed
0285  */
0286 static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
0287 {
0288     struct optee_call_waiter w;
0289 
0290     /* We need to retry until secure world isn't busy. */
0291     optee_cq_wait_init(&optee->call_queue, &w);
0292     while (true) {
0293         union {
0294             struct arm_smccc_res smccc;
0295             struct optee_smc_disable_shm_cache_result result;
0296         } res;
0297 
0298         optee->smc.invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE,
0299                      0, 0, 0, 0, 0, 0, 0, &res.smccc);
0300         if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
0301             break; /* All shm's freed */
0302         if (res.result.status == OPTEE_SMC_RETURN_OK) {
0303             struct tee_shm *shm;
0304 
0305             /*
0306              * Shared memory references that were not mapped by
0307              * this kernel must be ignored to prevent a crash.
0308              */
0309             if (!is_mapped)
0310                 continue;
0311 
0312             shm = reg_pair_to_ptr(res.result.shm_upper32,
0313                           res.result.shm_lower32);
0314             tee_shm_free(shm);
0315         } else {
0316             optee_cq_wait_for_completion(&optee->call_queue, &w);
0317         }
0318     }
0319     optee_cq_wait_final(&optee->call_queue, &w);
0320 }
0321 
0322 /**
0323  * optee_disable_shm_cache() - Disables caching of mapped shared memory
0324  *                 allocations in OP-TEE
0325  * @optee:  main service struct
0326  */
0327 static void optee_disable_shm_cache(struct optee *optee)
0328 {
0329     return __optee_disable_shm_cache(optee, true);
0330 }
0331 
0332 /**
0333  * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
0334  *                  allocations in OP-TEE which are not
0335  *                  currently mapped
0336  * @optee:  main service struct
0337  */
0338 static void optee_disable_unmapped_shm_cache(struct optee *optee)
0339 {
0340     return __optee_disable_shm_cache(optee, false);
0341 }
0342 
0343 #define PAGELIST_ENTRIES_PER_PAGE               \
0344     ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
0345 
0346 /*
0347  * The final entry in each pagelist page is a pointer to the next
0348  * pagelist page.
0349  */
0350 static size_t get_pages_list_size(size_t num_entries)
0351 {
0352     int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
0353 
0354     return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
0355 }
0356 
0357 static u64 *optee_allocate_pages_list(size_t num_entries)
0358 {
0359     return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
0360 }
0361 
0362 static void optee_free_pages_list(void *list, size_t num_entries)
0363 {
0364     free_pages_exact(list, get_pages_list_size(num_entries));
0365 }
0366 
0367 /**
0368  * optee_fill_pages_list() - write list of user pages to given shared
0369  * buffer.
0370  *
0371  * @dst: page-aligned buffer where list of pages will be stored
0372  * @pages: array of pages that represents shared buffer
0373  * @num_pages: number of entries in @pages
0374  * @page_offset: offset of user buffer from page start
0375  *
0376  * @dst should be big enough to hold list of user page addresses and
0377  *  links to the next pages of buffer
0378  */
0379 static void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
0380                   size_t page_offset)
0381 {
0382     int n = 0;
0383     phys_addr_t optee_page;
0384     /*
0385      * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
0386      * for details.
0387      */
0388     struct {
0389         u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
0390         u64 next_page_data;
0391     } *pages_data;
0392 
0393     /*
0394      * Currently OP-TEE uses 4k page size and it does not looks
0395      * like this will change in the future.  On other hand, there are
0396      * no know ARM architectures with page size < 4k.
0397      * Thus the next built assert looks redundant. But the following
0398      * code heavily relies on this assumption, so it is better be
0399      * safe than sorry.
0400      */
0401     BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
0402 
0403     pages_data = (void *)dst;
0404     /*
0405      * If linux page is bigger than 4k, and user buffer offset is
0406      * larger than 4k/8k/12k/etc this will skip first 4k pages,
0407      * because they bear no value data for OP-TEE.
0408      */
0409     optee_page = page_to_phys(*pages) +
0410         round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
0411 
0412     while (true) {
0413         pages_data->pages_list[n++] = optee_page;
0414 
0415         if (n == PAGELIST_ENTRIES_PER_PAGE) {
0416             pages_data->next_page_data =
0417                 virt_to_phys(pages_data + 1);
0418             pages_data++;
0419             n = 0;
0420         }
0421 
0422         optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
0423         if (!(optee_page & ~PAGE_MASK)) {
0424             if (!--num_pages)
0425                 break;
0426             pages++;
0427             optee_page = page_to_phys(*pages);
0428         }
0429     }
0430 }
0431 
0432 static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
0433                   struct page **pages, size_t num_pages,
0434                   unsigned long start)
0435 {
0436     struct optee *optee = tee_get_drvdata(ctx->teedev);
0437     struct optee_msg_arg *msg_arg;
0438     struct tee_shm *shm_arg;
0439     u64 *pages_list;
0440     size_t sz;
0441     int rc;
0442 
0443     if (!num_pages)
0444         return -EINVAL;
0445 
0446     rc = optee_check_mem_type(start, num_pages);
0447     if (rc)
0448         return rc;
0449 
0450     pages_list = optee_allocate_pages_list(num_pages);
0451     if (!pages_list)
0452         return -ENOMEM;
0453 
0454     /*
0455      * We're about to register shared memory we can't register shared
0456      * memory for this request or there's a catch-22.
0457      *
0458      * So in this we'll have to do the good old temporary private
0459      * allocation instead of using optee_get_msg_arg().
0460      */
0461     sz = optee_msg_arg_size(optee->rpc_param_count);
0462     shm_arg = tee_shm_alloc_priv_buf(ctx, sz);
0463     if (IS_ERR(shm_arg)) {
0464         rc = PTR_ERR(shm_arg);
0465         goto out;
0466     }
0467     msg_arg = tee_shm_get_va(shm_arg, 0);
0468     if (IS_ERR(msg_arg)) {
0469         rc = PTR_ERR(msg_arg);
0470         goto out;
0471     }
0472 
0473     optee_fill_pages_list(pages_list, pages, num_pages,
0474                   tee_shm_get_page_offset(shm));
0475 
0476     memset(msg_arg, 0, OPTEE_MSG_GET_ARG_SIZE(1));
0477     msg_arg->num_params = 1;
0478     msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
0479     msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
0480                 OPTEE_MSG_ATTR_NONCONTIG;
0481     msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
0482     msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
0483     /*
0484      * In the least bits of msg_arg->params->u.tmem.buf_ptr we
0485      * store buffer offset from 4k page, as described in OP-TEE ABI.
0486      */
0487     msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
0488       (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
0489 
0490     if (optee->ops->do_call_with_arg(ctx, shm_arg, 0) ||
0491         msg_arg->ret != TEEC_SUCCESS)
0492         rc = -EINVAL;
0493 
0494     tee_shm_free(shm_arg);
0495 out:
0496     optee_free_pages_list(pages_list, num_pages);
0497     return rc;
0498 }
0499 
0500 static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
0501 {
0502     struct optee *optee = tee_get_drvdata(ctx->teedev);
0503     struct optee_msg_arg *msg_arg;
0504     struct tee_shm *shm_arg;
0505     int rc = 0;
0506     size_t sz;
0507 
0508     /*
0509      * We're about to unregister shared memory and we may not be able
0510      * register shared memory for this request in case we're called
0511      * from optee_shm_arg_cache_uninit().
0512      *
0513      * So in order to keep things simple in this function just as in
0514      * optee_shm_register() we'll use temporary private allocation
0515      * instead of using optee_get_msg_arg().
0516      */
0517     sz = optee_msg_arg_size(optee->rpc_param_count);
0518     shm_arg = tee_shm_alloc_priv_buf(ctx, sz);
0519     if (IS_ERR(shm_arg))
0520         return PTR_ERR(shm_arg);
0521     msg_arg = tee_shm_get_va(shm_arg, 0);
0522     if (IS_ERR(msg_arg)) {
0523         rc = PTR_ERR(msg_arg);
0524         goto out;
0525     }
0526 
0527     memset(msg_arg, 0, sz);
0528     msg_arg->num_params = 1;
0529     msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
0530     msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
0531     msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
0532 
0533     if (optee->ops->do_call_with_arg(ctx, shm_arg, 0) ||
0534         msg_arg->ret != TEEC_SUCCESS)
0535         rc = -EINVAL;
0536 out:
0537     tee_shm_free(shm_arg);
0538     return rc;
0539 }
0540 
0541 static int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
0542                    struct page **pages, size_t num_pages,
0543                    unsigned long start)
0544 {
0545     /*
0546      * We don't want to register supplicant memory in OP-TEE.
0547      * Instead information about it will be passed in RPC code.
0548      */
0549     return optee_check_mem_type(start, num_pages);
0550 }
0551 
0552 static int optee_shm_unregister_supp(struct tee_context *ctx,
0553                      struct tee_shm *shm)
0554 {
0555     return 0;
0556 }
0557 
0558 /*
0559  * 3. Dynamic shared memory pool based on alloc_pages()
0560  *
0561  * Implements an OP-TEE specific shared memory pool which is used
0562  * when dynamic shared memory is supported by secure world.
0563  *
0564  * The main function is optee_shm_pool_alloc_pages().
0565  */
0566 
0567 static int pool_op_alloc(struct tee_shm_pool *pool,
0568              struct tee_shm *shm, size_t size, size_t align)
0569 {
0570     /*
0571      * Shared memory private to the OP-TEE driver doesn't need
0572      * to be registered with OP-TEE.
0573      */
0574     if (shm->flags & TEE_SHM_PRIV)
0575         return optee_pool_op_alloc_helper(pool, shm, size, align, NULL);
0576 
0577     return optee_pool_op_alloc_helper(pool, shm, size, align,
0578                       optee_shm_register);
0579 }
0580 
0581 static void pool_op_free(struct tee_shm_pool *pool,
0582              struct tee_shm *shm)
0583 {
0584     if (!(shm->flags & TEE_SHM_PRIV))
0585         optee_pool_op_free_helper(pool, shm, optee_shm_unregister);
0586     else
0587         optee_pool_op_free_helper(pool, shm, NULL);
0588 }
0589 
0590 static void pool_op_destroy_pool(struct tee_shm_pool *pool)
0591 {
0592     kfree(pool);
0593 }
0594 
0595 static const struct tee_shm_pool_ops pool_ops = {
0596     .alloc = pool_op_alloc,
0597     .free = pool_op_free,
0598     .destroy_pool = pool_op_destroy_pool,
0599 };
0600 
0601 /**
0602  * optee_shm_pool_alloc_pages() - create page-based allocator pool
0603  *
0604  * This pool is used when OP-TEE supports dymanic SHM. In this case
0605  * command buffers and such are allocated from kernel's own memory.
0606  */
0607 static struct tee_shm_pool *optee_shm_pool_alloc_pages(void)
0608 {
0609     struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);
0610 
0611     if (!pool)
0612         return ERR_PTR(-ENOMEM);
0613 
0614     pool->ops = &pool_ops;
0615 
0616     return pool;
0617 }
0618 
0619 /*
0620  * 4. Do a normal scheduled call into secure world
0621  *
0622  * The function optee_smc_do_call_with_arg() performs a normal scheduled
0623  * call into secure world. During this call may normal world request help
0624  * from normal world using RPCs, Remote Procedure Calls. This includes
0625  * delivery of non-secure interrupts to for instance allow rescheduling of
0626  * the current task.
0627  */
0628 
0629 static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
0630                      struct optee_msg_arg *arg)
0631 {
0632     struct tee_shm *shm;
0633 
0634     arg->ret_origin = TEEC_ORIGIN_COMMS;
0635 
0636     if (arg->num_params != 1 ||
0637         arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
0638         arg->ret = TEEC_ERROR_BAD_PARAMETERS;
0639         return;
0640     }
0641 
0642     shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
0643     switch (arg->params[0].u.value.a) {
0644     case OPTEE_RPC_SHM_TYPE_APPL:
0645         optee_rpc_cmd_free_suppl(ctx, shm);
0646         break;
0647     case OPTEE_RPC_SHM_TYPE_KERNEL:
0648         tee_shm_free(shm);
0649         break;
0650     default:
0651         arg->ret = TEEC_ERROR_BAD_PARAMETERS;
0652     }
0653     arg->ret = TEEC_SUCCESS;
0654 }
0655 
0656 static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
0657                       struct optee *optee,
0658                       struct optee_msg_arg *arg,
0659                       struct optee_call_ctx *call_ctx)
0660 {
0661     phys_addr_t pa;
0662     struct tee_shm *shm;
0663     size_t sz;
0664     size_t n;
0665 
0666     arg->ret_origin = TEEC_ORIGIN_COMMS;
0667 
0668     if (!arg->num_params ||
0669         arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
0670         arg->ret = TEEC_ERROR_BAD_PARAMETERS;
0671         return;
0672     }
0673 
0674     for (n = 1; n < arg->num_params; n++) {
0675         if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
0676             arg->ret = TEEC_ERROR_BAD_PARAMETERS;
0677             return;
0678         }
0679     }
0680 
0681     sz = arg->params[0].u.value.b;
0682     switch (arg->params[0].u.value.a) {
0683     case OPTEE_RPC_SHM_TYPE_APPL:
0684         shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
0685         break;
0686     case OPTEE_RPC_SHM_TYPE_KERNEL:
0687         shm = tee_shm_alloc_priv_buf(optee->ctx, sz);
0688         break;
0689     default:
0690         arg->ret = TEEC_ERROR_BAD_PARAMETERS;
0691         return;
0692     }
0693 
0694     if (IS_ERR(shm)) {
0695         arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
0696         return;
0697     }
0698 
0699     if (tee_shm_get_pa(shm, 0, &pa)) {
0700         arg->ret = TEEC_ERROR_BAD_PARAMETERS;
0701         goto bad;
0702     }
0703 
0704     sz = tee_shm_get_size(shm);
0705 
0706     if (tee_shm_is_dynamic(shm)) {
0707         struct page **pages;
0708         u64 *pages_list;
0709         size_t page_num;
0710 
0711         pages = tee_shm_get_pages(shm, &page_num);
0712         if (!pages || !page_num) {
0713             arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
0714             goto bad;
0715         }
0716 
0717         pages_list = optee_allocate_pages_list(page_num);
0718         if (!pages_list) {
0719             arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
0720             goto bad;
0721         }
0722 
0723         call_ctx->pages_list = pages_list;
0724         call_ctx->num_entries = page_num;
0725 
0726         arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
0727                       OPTEE_MSG_ATTR_NONCONTIG;
0728         /*
0729          * In the least bits of u.tmem.buf_ptr we store buffer offset
0730          * from 4k page, as described in OP-TEE ABI.
0731          */
0732         arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
0733             (tee_shm_get_page_offset(shm) &
0734              (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
0735         arg->params[0].u.tmem.size = tee_shm_get_size(shm);
0736         arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
0737 
0738         optee_fill_pages_list(pages_list, pages, page_num,
0739                       tee_shm_get_page_offset(shm));
0740     } else {
0741         arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
0742         arg->params[0].u.tmem.buf_ptr = pa;
0743         arg->params[0].u.tmem.size = sz;
0744         arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
0745     }
0746 
0747     arg->ret = TEEC_SUCCESS;
0748     return;
0749 bad:
0750     tee_shm_free(shm);
0751 }
0752 
0753 static void free_pages_list(struct optee_call_ctx *call_ctx)
0754 {
0755     if (call_ctx->pages_list) {
0756         optee_free_pages_list(call_ctx->pages_list,
0757                       call_ctx->num_entries);
0758         call_ctx->pages_list = NULL;
0759         call_ctx->num_entries = 0;
0760     }
0761 }
0762 
0763 static void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
0764 {
0765     free_pages_list(call_ctx);
0766 }
0767 
0768 static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
0769                 struct optee_msg_arg *arg,
0770                 struct optee_call_ctx *call_ctx)
0771 {
0772 
0773     switch (arg->cmd) {
0774     case OPTEE_RPC_CMD_SHM_ALLOC:
0775         free_pages_list(call_ctx);
0776         handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx);
0777         break;
0778     case OPTEE_RPC_CMD_SHM_FREE:
0779         handle_rpc_func_cmd_shm_free(ctx, arg);
0780         break;
0781     default:
0782         optee_rpc_cmd(ctx, optee, arg);
0783     }
0784 }
0785 
0786 /**
0787  * optee_handle_rpc() - handle RPC from secure world
0788  * @ctx:    context doing the RPC
0789  * @param:  value of registers for the RPC
0790  * @call_ctx:   call context. Preserved during one OP-TEE invocation
0791  *
0792  * Result of RPC is written back into @param.
0793  */
0794 static void optee_handle_rpc(struct tee_context *ctx,
0795                  struct optee_msg_arg *rpc_arg,
0796                  struct optee_rpc_param *param,
0797                  struct optee_call_ctx *call_ctx)
0798 {
0799     struct tee_device *teedev = ctx->teedev;
0800     struct optee *optee = tee_get_drvdata(teedev);
0801     struct optee_msg_arg *arg;
0802     struct tee_shm *shm;
0803     phys_addr_t pa;
0804 
0805     switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
0806     case OPTEE_SMC_RPC_FUNC_ALLOC:
0807         shm = tee_shm_alloc_priv_buf(optee->ctx, param->a1);
0808         if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
0809             reg_pair_from_64(&param->a1, &param->a2, pa);
0810             reg_pair_from_64(&param->a4, &param->a5,
0811                      (unsigned long)shm);
0812         } else {
0813             param->a1 = 0;
0814             param->a2 = 0;
0815             param->a4 = 0;
0816             param->a5 = 0;
0817         }
0818         kmemleak_not_leak(shm);
0819         break;
0820     case OPTEE_SMC_RPC_FUNC_FREE:
0821         shm = reg_pair_to_ptr(param->a1, param->a2);
0822         tee_shm_free(shm);
0823         break;
0824     case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
0825         /*
0826          * A foreign interrupt was raised while secure world was
0827          * executing, since they are handled in Linux a dummy RPC is
0828          * performed to let Linux take the interrupt through the normal
0829          * vector.
0830          */
0831         break;
0832     case OPTEE_SMC_RPC_FUNC_CMD:
0833         if (rpc_arg) {
0834             arg = rpc_arg;
0835         } else {
0836             shm = reg_pair_to_ptr(param->a1, param->a2);
0837             arg = tee_shm_get_va(shm, 0);
0838             if (IS_ERR(arg)) {
0839                 pr_err("%s: tee_shm_get_va %p failed\n",
0840                        __func__, shm);
0841                 break;
0842             }
0843         }
0844 
0845         handle_rpc_func_cmd(ctx, optee, arg, call_ctx);
0846         break;
0847     default:
0848         pr_warn("Unknown RPC func 0x%x\n",
0849             (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
0850         break;
0851     }
0852 
0853     param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
0854 }
0855 
0856 /**
0857  * optee_smc_do_call_with_arg() - Do an SMC to OP-TEE in secure world
0858  * @ctx:    calling context
0859  * @shm:    shared memory holding the message to pass to secure world
0860  * @offs:   offset of the message in @shm
0861  *
0862  * Does and SMC to OP-TEE in secure world and handles eventual resulting
0863  * Remote Procedure Calls (RPC) from OP-TEE.
0864  *
0865  * Returns return code from secure world, 0 is OK
0866  */
0867 static int optee_smc_do_call_with_arg(struct tee_context *ctx,
0868                       struct tee_shm *shm, u_int offs)
0869 {
0870     struct optee *optee = tee_get_drvdata(ctx->teedev);
0871     struct optee_call_waiter w;
0872     struct optee_rpc_param param = { };
0873     struct optee_call_ctx call_ctx = { };
0874     struct optee_msg_arg *rpc_arg = NULL;
0875     int rc;
0876 
0877     if (optee->rpc_param_count) {
0878         struct optee_msg_arg *arg;
0879         unsigned int rpc_arg_offs;
0880 
0881         arg = tee_shm_get_va(shm, offs);
0882         if (IS_ERR(arg))
0883             return PTR_ERR(arg);
0884 
0885         rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
0886         rpc_arg = tee_shm_get_va(shm, offs + rpc_arg_offs);
0887         if (IS_ERR(rpc_arg))
0888             return PTR_ERR(rpc_arg);
0889     }
0890 
0891     if  (rpc_arg && tee_shm_is_dynamic(shm)) {
0892         param.a0 = OPTEE_SMC_CALL_WITH_REGD_ARG;
0893         reg_pair_from_64(&param.a1, &param.a2, (u_long)shm);
0894         param.a3 = offs;
0895     } else {
0896         phys_addr_t parg;
0897 
0898         rc = tee_shm_get_pa(shm, offs, &parg);
0899         if (rc)
0900             return rc;
0901 
0902         if (rpc_arg)
0903             param.a0 = OPTEE_SMC_CALL_WITH_RPC_ARG;
0904         else
0905             param.a0 = OPTEE_SMC_CALL_WITH_ARG;
0906         reg_pair_from_64(&param.a1, &param.a2, parg);
0907     }
0908     /* Initialize waiter */
0909     optee_cq_wait_init(&optee->call_queue, &w);
0910     while (true) {
0911         struct arm_smccc_res res;
0912 
0913         trace_optee_invoke_fn_begin(&param);
0914         optee->smc.invoke_fn(param.a0, param.a1, param.a2, param.a3,
0915                      param.a4, param.a5, param.a6, param.a7,
0916                      &res);
0917         trace_optee_invoke_fn_end(&param, &res);
0918 
0919         if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
0920             /*
0921              * Out of threads in secure world, wait for a thread
0922              * become available.
0923              */
0924             optee_cq_wait_for_completion(&optee->call_queue, &w);
0925         } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
0926             cond_resched();
0927             param.a0 = res.a0;
0928             param.a1 = res.a1;
0929             param.a2 = res.a2;
0930             param.a3 = res.a3;
0931             optee_handle_rpc(ctx, rpc_arg, &param, &call_ctx);
0932         } else {
0933             rc = res.a0;
0934             break;
0935         }
0936     }
0937 
0938     optee_rpc_finalize_call(&call_ctx);
0939     /*
0940      * We're done with our thread in secure world, if there's any
0941      * thread waiters wake up one.
0942      */
0943     optee_cq_wait_final(&optee->call_queue, &w);
0944 
0945     return rc;
0946 }
0947 
0948 static int simple_call_with_arg(struct tee_context *ctx, u32 cmd)
0949 {
0950     struct optee_shm_arg_entry *entry;
0951     struct optee_msg_arg *msg_arg;
0952     struct tee_shm *shm;
0953     u_int offs;
0954 
0955     msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
0956     if (IS_ERR(msg_arg))
0957         return PTR_ERR(msg_arg);
0958 
0959     msg_arg->cmd = cmd;
0960     optee_smc_do_call_with_arg(ctx, shm, offs);
0961 
0962     optee_free_msg_arg(ctx, entry, offs);
0963     return 0;
0964 }
0965 
0966 static int optee_smc_do_bottom_half(struct tee_context *ctx)
0967 {
0968     return simple_call_with_arg(ctx, OPTEE_MSG_CMD_DO_BOTTOM_HALF);
0969 }
0970 
0971 static int optee_smc_stop_async_notif(struct tee_context *ctx)
0972 {
0973     return simple_call_with_arg(ctx, OPTEE_MSG_CMD_STOP_ASYNC_NOTIF);
0974 }
0975 
0976 /*
0977  * 5. Asynchronous notification
0978  */
0979 
0980 static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid,
0981                  bool *value_pending)
0982 {
0983     struct arm_smccc_res res;
0984 
0985     invoke_fn(OPTEE_SMC_GET_ASYNC_NOTIF_VALUE, 0, 0, 0, 0, 0, 0, 0, &res);
0986 
0987     if (res.a0)
0988         return 0;
0989     *value_valid = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID);
0990     *value_pending = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING);
0991     return res.a1;
0992 }
0993 
0994 static irqreturn_t notif_irq_handler(int irq, void *dev_id)
0995 {
0996     struct optee *optee = dev_id;
0997     bool do_bottom_half = false;
0998     bool value_valid;
0999     bool value_pending;
1000     u32 value;
1001 
1002     do {
1003         value = get_async_notif_value(optee->smc.invoke_fn,
1004                           &value_valid, &value_pending);
1005         if (!value_valid)
1006             break;
1007 
1008         if (value == OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF)
1009             do_bottom_half = true;
1010         else
1011             optee_notif_send(optee, value);
1012     } while (value_pending);
1013 
1014     if (do_bottom_half)
1015         return IRQ_WAKE_THREAD;
1016     return IRQ_HANDLED;
1017 }
1018 
1019 static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
1020 {
1021     struct optee *optee = dev_id;
1022 
1023     optee_smc_do_bottom_half(optee->ctx);
1024 
1025     return IRQ_HANDLED;
1026 }
1027 
1028 static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
1029 {
1030     int rc;
1031 
1032     rc = request_threaded_irq(irq, notif_irq_handler,
1033                   notif_irq_thread_fn,
1034                   0, "optee_notification", optee);
1035     if (rc)
1036         return rc;
1037 
1038     optee->smc.notif_irq = irq;
1039 
1040     return 0;
1041 }
1042 
1043 static void optee_smc_notif_uninit_irq(struct optee *optee)
1044 {
1045     if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
1046         optee_smc_stop_async_notif(optee->ctx);
1047         if (optee->smc.notif_irq) {
1048             free_irq(optee->smc.notif_irq, optee);
1049             irq_dispose_mapping(optee->smc.notif_irq);
1050         }
1051     }
1052 }
1053 
1054 /*
1055  * 6. Driver initialization
1056  *
1057  * During driver initialization is secure world probed to find out which
1058  * features it supports so the driver can be initialized with a matching
1059  * configuration. This involves for instance support for dynamic shared
1060  * memory instead of a static memory carvout.
1061  */
1062 
1063 static void optee_get_version(struct tee_device *teedev,
1064                   struct tee_ioctl_version_data *vers)
1065 {
1066     struct tee_ioctl_version_data v = {
1067         .impl_id = TEE_IMPL_ID_OPTEE,
1068         .impl_caps = TEE_OPTEE_CAP_TZ,
1069         .gen_caps = TEE_GEN_CAP_GP,
1070     };
1071     struct optee *optee = tee_get_drvdata(teedev);
1072 
1073     if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1074         v.gen_caps |= TEE_GEN_CAP_REG_MEM;
1075     if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
1076         v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL;
1077     *vers = v;
1078 }
1079 
1080 static int optee_smc_open(struct tee_context *ctx)
1081 {
1082     struct optee *optee = tee_get_drvdata(ctx->teedev);
1083     u32 sec_caps = optee->smc.sec_caps;
1084 
1085     return optee_open(ctx, sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL);
1086 }
1087 
1088 static const struct tee_driver_ops optee_clnt_ops = {
1089     .get_version = optee_get_version,
1090     .open = optee_smc_open,
1091     .release = optee_release,
1092     .open_session = optee_open_session,
1093     .close_session = optee_close_session,
1094     .invoke_func = optee_invoke_func,
1095     .cancel_req = optee_cancel_req,
1096     .shm_register = optee_shm_register,
1097     .shm_unregister = optee_shm_unregister,
1098 };
1099 
1100 static const struct tee_desc optee_clnt_desc = {
1101     .name = DRIVER_NAME "-clnt",
1102     .ops = &optee_clnt_ops,
1103     .owner = THIS_MODULE,
1104 };
1105 
1106 static const struct tee_driver_ops optee_supp_ops = {
1107     .get_version = optee_get_version,
1108     .open = optee_smc_open,
1109     .release = optee_release_supp,
1110     .supp_recv = optee_supp_recv,
1111     .supp_send = optee_supp_send,
1112     .shm_register = optee_shm_register_supp,
1113     .shm_unregister = optee_shm_unregister_supp,
1114 };
1115 
1116 static const struct tee_desc optee_supp_desc = {
1117     .name = DRIVER_NAME "-supp",
1118     .ops = &optee_supp_ops,
1119     .owner = THIS_MODULE,
1120     .flags = TEE_DESC_PRIVILEGED,
1121 };
1122 
1123 static const struct optee_ops optee_ops = {
1124     .do_call_with_arg = optee_smc_do_call_with_arg,
1125     .to_msg_param = optee_to_msg_param,
1126     .from_msg_param = optee_from_msg_param,
1127 };
1128 
1129 static int enable_async_notif(optee_invoke_fn *invoke_fn)
1130 {
1131     struct arm_smccc_res res;
1132 
1133     invoke_fn(OPTEE_SMC_ENABLE_ASYNC_NOTIF, 0, 0, 0, 0, 0, 0, 0, &res);
1134 
1135     if (res.a0)
1136         return -EINVAL;
1137     return 0;
1138 }
1139 
1140 static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
1141 {
1142     struct arm_smccc_res res;
1143 
1144     invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
1145 
1146     if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
1147         res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
1148         return true;
1149     return false;
1150 }
1151 
1152 static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
1153 {
1154     union {
1155         struct arm_smccc_res smccc;
1156         struct optee_smc_call_get_os_revision_result result;
1157     } res = {
1158         .result = {
1159             .build_id = 0
1160         }
1161     };
1162 
1163     invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
1164           &res.smccc);
1165 
1166     if (res.result.build_id)
1167         pr_info("revision %lu.%lu (%08lx)", res.result.major,
1168             res.result.minor, res.result.build_id);
1169     else
1170         pr_info("revision %lu.%lu", res.result.major, res.result.minor);
1171 }
1172 
1173 static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
1174 {
1175     union {
1176         struct arm_smccc_res smccc;
1177         struct optee_smc_calls_revision_result result;
1178     } res;
1179 
1180     invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
1181 
1182     if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
1183         (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
1184         return true;
1185     return false;
1186 }
1187 
1188 static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
1189                         u32 *sec_caps, u32 *max_notif_value,
1190                         unsigned int *rpc_param_count)
1191 {
1192     union {
1193         struct arm_smccc_res smccc;
1194         struct optee_smc_exchange_capabilities_result result;
1195     } res;
1196     u32 a1 = 0;
1197 
1198     /*
1199      * TODO This isn't enough to tell if it's UP system (from kernel
1200      * point of view) or not, is_smp() returns the information
1201      * needed, but can't be called directly from here.
1202      */
1203     if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
1204         a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
1205 
1206     invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
1207           &res.smccc);
1208 
1209     if (res.result.status != OPTEE_SMC_RETURN_OK)
1210         return false;
1211 
1212     *sec_caps = res.result.capabilities;
1213     if (*sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF)
1214         *max_notif_value = res.result.max_notif_value;
1215     else
1216         *max_notif_value = OPTEE_DEFAULT_MAX_NOTIF_VALUE;
1217     if (*sec_caps & OPTEE_SMC_SEC_CAP_RPC_ARG)
1218         *rpc_param_count = (u8)res.result.data;
1219     else
1220         *rpc_param_count = 0;
1221 
1222     return true;
1223 }
1224 
1225 static struct tee_shm_pool *
1226 optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
1227 {
1228     union {
1229         struct arm_smccc_res smccc;
1230         struct optee_smc_get_shm_config_result result;
1231     } res;
1232     unsigned long vaddr;
1233     phys_addr_t paddr;
1234     size_t size;
1235     phys_addr_t begin;
1236     phys_addr_t end;
1237     void *va;
1238     void *rc;
1239 
1240     invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
1241     if (res.result.status != OPTEE_SMC_RETURN_OK) {
1242         pr_err("static shm service not available\n");
1243         return ERR_PTR(-ENOENT);
1244     }
1245 
1246     if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
1247         pr_err("only normal cached shared memory supported\n");
1248         return ERR_PTR(-EINVAL);
1249     }
1250 
1251     begin = roundup(res.result.start, PAGE_SIZE);
1252     end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
1253     paddr = begin;
1254     size = end - begin;
1255 
1256     va = memremap(paddr, size, MEMREMAP_WB);
1257     if (!va) {
1258         pr_err("shared memory ioremap failed\n");
1259         return ERR_PTR(-EINVAL);
1260     }
1261     vaddr = (unsigned long)va;
1262 
1263     rc = tee_shm_pool_alloc_res_mem(vaddr, paddr, size,
1264                     OPTEE_MIN_STATIC_POOL_ALIGN);
1265     if (IS_ERR(rc))
1266         memunmap(va);
1267     else
1268         *memremaped_shm = va;
1269 
1270     return rc;
1271 }
1272 
1273 /* Simple wrapper functions to be able to use a function pointer */
1274 static void optee_smccc_smc(unsigned long a0, unsigned long a1,
1275                 unsigned long a2, unsigned long a3,
1276                 unsigned long a4, unsigned long a5,
1277                 unsigned long a6, unsigned long a7,
1278                 struct arm_smccc_res *res)
1279 {
1280     arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
1281 }
1282 
1283 static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
1284                 unsigned long a2, unsigned long a3,
1285                 unsigned long a4, unsigned long a5,
1286                 unsigned long a6, unsigned long a7,
1287                 struct arm_smccc_res *res)
1288 {
1289     arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
1290 }
1291 
1292 static optee_invoke_fn *get_invoke_func(struct device *dev)
1293 {
1294     const char *method;
1295 
1296     pr_info("probing for conduit method.\n");
1297 
1298     if (device_property_read_string(dev, "method", &method)) {
1299         pr_warn("missing \"method\" property\n");
1300         return ERR_PTR(-ENXIO);
1301     }
1302 
1303     if (!strcmp("hvc", method))
1304         return optee_smccc_hvc;
1305     else if (!strcmp("smc", method))
1306         return optee_smccc_smc;
1307 
1308     pr_warn("invalid \"method\" property: %s\n", method);
1309     return ERR_PTR(-EINVAL);
1310 }
1311 
1312 /* optee_remove - Device Removal Routine
1313  * @pdev: platform device information struct
1314  *
1315  * optee_remove is called by platform subsystem to alert the driver
1316  * that it should release the device
1317  */
1318 static int optee_smc_remove(struct platform_device *pdev)
1319 {
1320     struct optee *optee = platform_get_drvdata(pdev);
1321 
1322     /*
1323      * Ask OP-TEE to free all cached shared memory objects to decrease
1324      * reference counters and also avoid wild pointers in secure world
1325      * into the old shared memory range.
1326      */
1327     if (!optee->rpc_param_count)
1328         optee_disable_shm_cache(optee);
1329 
1330     optee_smc_notif_uninit_irq(optee);
1331 
1332     optee_remove_common(optee);
1333 
1334     if (optee->smc.memremaped_shm)
1335         memunmap(optee->smc.memremaped_shm);
1336 
1337     kfree(optee);
1338 
1339     return 0;
1340 }
1341 
1342 /* optee_shutdown - Device Removal Routine
1343  * @pdev: platform device information struct
1344  *
1345  * platform_shutdown is called by the platform subsystem to alert
1346  * the driver that a shutdown, reboot, or kexec is happening and
1347  * device must be disabled.
1348  */
1349 static void optee_shutdown(struct platform_device *pdev)
1350 {
1351     struct optee *optee = platform_get_drvdata(pdev);
1352 
1353     if (!optee->rpc_param_count)
1354         optee_disable_shm_cache(optee);
1355 }
1356 
1357 static int optee_probe(struct platform_device *pdev)
1358 {
1359     optee_invoke_fn *invoke_fn;
1360     struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
1361     struct optee *optee = NULL;
1362     void *memremaped_shm = NULL;
1363     unsigned int rpc_param_count;
1364     struct tee_device *teedev;
1365     struct tee_context *ctx;
1366     u32 max_notif_value;
1367     u32 arg_cache_flags;
1368     u32 sec_caps;
1369     int rc;
1370 
1371     invoke_fn = get_invoke_func(&pdev->dev);
1372     if (IS_ERR(invoke_fn))
1373         return PTR_ERR(invoke_fn);
1374 
1375     if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
1376         pr_warn("api uid mismatch\n");
1377         return -EINVAL;
1378     }
1379 
1380     optee_msg_get_os_revision(invoke_fn);
1381 
1382     if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
1383         pr_warn("api revision mismatch\n");
1384         return -EINVAL;
1385     }
1386 
1387     if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps,
1388                          &max_notif_value,
1389                          &rpc_param_count)) {
1390         pr_warn("capabilities mismatch\n");
1391         return -EINVAL;
1392     }
1393 
1394     /*
1395      * Try to use dynamic shared memory if possible
1396      */
1397     if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) {
1398         /*
1399          * If we have OPTEE_SMC_SEC_CAP_RPC_ARG we can ask
1400          * optee_get_msg_arg() to pre-register (by having
1401          * OPTEE_SHM_ARG_ALLOC_PRIV cleared) the page used to pass
1402          * an argument struct.
1403          *
1404          * With the page is pre-registered we can use a non-zero
1405          * offset for argument struct, this is indicated with
1406          * OPTEE_SHM_ARG_SHARED.
1407          *
1408          * This means that optee_smc_do_call_with_arg() will use
1409          * OPTEE_SMC_CALL_WITH_REGD_ARG for pre-registered pages.
1410          */
1411         if (sec_caps & OPTEE_SMC_SEC_CAP_RPC_ARG)
1412             arg_cache_flags = OPTEE_SHM_ARG_SHARED;
1413         else
1414             arg_cache_flags = OPTEE_SHM_ARG_ALLOC_PRIV;
1415 
1416         pool = optee_shm_pool_alloc_pages();
1417     }
1418 
1419     /*
1420      * If dynamic shared memory is not available or failed - try static one
1421      */
1422     if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) {
1423         /*
1424          * The static memory pool can use non-zero page offsets so
1425          * let optee_get_msg_arg() know that with OPTEE_SHM_ARG_SHARED.
1426          *
1427          * optee_get_msg_arg() should not pre-register the
1428          * allocated page used to pass an argument struct, this is
1429          * indicated with OPTEE_SHM_ARG_ALLOC_PRIV.
1430          *
1431          * This means that optee_smc_do_call_with_arg() will use
1432          * OPTEE_SMC_CALL_WITH_ARG if rpc_param_count is 0, else
1433          * OPTEE_SMC_CALL_WITH_RPC_ARG.
1434          */
1435         arg_cache_flags = OPTEE_SHM_ARG_SHARED |
1436                   OPTEE_SHM_ARG_ALLOC_PRIV;
1437         pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
1438     }
1439 
1440     if (IS_ERR(pool))
1441         return PTR_ERR(pool);
1442 
1443     optee = kzalloc(sizeof(*optee), GFP_KERNEL);
1444     if (!optee) {
1445         rc = -ENOMEM;
1446         goto err_free_pool;
1447     }
1448 
1449     optee->ops = &optee_ops;
1450     optee->smc.invoke_fn = invoke_fn;
1451     optee->smc.sec_caps = sec_caps;
1452     optee->rpc_param_count = rpc_param_count;
1453 
1454     teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
1455     if (IS_ERR(teedev)) {
1456         rc = PTR_ERR(teedev);
1457         goto err_free_optee;
1458     }
1459     optee->teedev = teedev;
1460 
1461     teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
1462     if (IS_ERR(teedev)) {
1463         rc = PTR_ERR(teedev);
1464         goto err_unreg_teedev;
1465     }
1466     optee->supp_teedev = teedev;
1467 
1468     rc = tee_device_register(optee->teedev);
1469     if (rc)
1470         goto err_unreg_supp_teedev;
1471 
1472     rc = tee_device_register(optee->supp_teedev);
1473     if (rc)
1474         goto err_unreg_supp_teedev;
1475 
1476     mutex_init(&optee->call_queue.mutex);
1477     INIT_LIST_HEAD(&optee->call_queue.waiters);
1478     optee_supp_init(&optee->supp);
1479     optee->smc.memremaped_shm = memremaped_shm;
1480     optee->pool = pool;
1481     optee_shm_arg_cache_init(optee, arg_cache_flags);
1482 
1483     platform_set_drvdata(pdev, optee);
1484     ctx = teedev_open(optee->teedev);
1485     if (IS_ERR(ctx)) {
1486         rc = PTR_ERR(ctx);
1487         goto err_supp_uninit;
1488     }
1489     optee->ctx = ctx;
1490     rc = optee_notif_init(optee, max_notif_value);
1491     if (rc)
1492         goto err_close_ctx;
1493 
1494     if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
1495         unsigned int irq;
1496 
1497         rc = platform_get_irq(pdev, 0);
1498         if (rc < 0) {
1499             pr_err("platform_get_irq: ret %d\n", rc);
1500             goto err_notif_uninit;
1501         }
1502         irq = rc;
1503 
1504         rc = optee_smc_notif_init_irq(optee, irq);
1505         if (rc) {
1506             irq_dispose_mapping(irq);
1507             goto err_notif_uninit;
1508         }
1509         enable_async_notif(optee->smc.invoke_fn);
1510         pr_info("Asynchronous notifications enabled\n");
1511     }
1512 
1513     /*
1514      * Ensure that there are no pre-existing shm objects before enabling
1515      * the shm cache so that there's no chance of receiving an invalid
1516      * address during shutdown. This could occur, for example, if we're
1517      * kexec booting from an older kernel that did not properly cleanup the
1518      * shm cache.
1519      */
1520     optee_disable_unmapped_shm_cache(optee);
1521 
1522     /*
1523      * Only enable the shm cache in case we're not able to pass the RPC
1524      * arg struct right after the normal arg struct.
1525      */
1526     if (!optee->rpc_param_count)
1527         optee_enable_shm_cache(optee);
1528 
1529     if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1530         pr_info("dynamic shared memory is enabled\n");
1531 
1532     rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
1533     if (rc)
1534         goto err_disable_shm_cache;
1535 
1536     pr_info("initialized driver\n");
1537     return 0;
1538 
1539 err_disable_shm_cache:
1540     if (!optee->rpc_param_count)
1541         optee_disable_shm_cache(optee);
1542     optee_smc_notif_uninit_irq(optee);
1543     optee_unregister_devices();
1544 err_notif_uninit:
1545     optee_notif_uninit(optee);
1546 err_close_ctx:
1547     teedev_close_context(ctx);
1548 err_supp_uninit:
1549     optee_shm_arg_cache_uninit(optee);
1550     optee_supp_uninit(&optee->supp);
1551     mutex_destroy(&optee->call_queue.mutex);
1552 err_unreg_supp_teedev:
1553     tee_device_unregister(optee->supp_teedev);
1554 err_unreg_teedev:
1555     tee_device_unregister(optee->teedev);
1556 err_free_optee:
1557     kfree(optee);
1558 err_free_pool:
1559     tee_shm_pool_free(pool);
1560     if (memremaped_shm)
1561         memunmap(memremaped_shm);
1562     return rc;
1563 }
1564 
1565 static const struct of_device_id optee_dt_match[] = {
1566     { .compatible = "linaro,optee-tz" },
1567     {},
1568 };
1569 MODULE_DEVICE_TABLE(of, optee_dt_match);
1570 
1571 static struct platform_driver optee_driver = {
1572     .probe  = optee_probe,
1573     .remove = optee_smc_remove,
1574     .shutdown = optee_shutdown,
1575     .driver = {
1576         .name = "optee",
1577         .of_match_table = optee_dt_match,
1578     },
1579 };
1580 
1581 int optee_smc_abi_register(void)
1582 {
1583     return platform_driver_register(&optee_driver);
1584 }
1585 
1586 void optee_smc_abi_unregister(void)
1587 {
1588     platform_driver_unregister(&optee_driver);
1589 }