Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (c) 2015-2021, Linaro Limited
0004  */
0005 #include <linux/device.h>
0006 #include <linux/err.h>
0007 #include <linux/errno.h>
0008 #include <linux/mm.h>
0009 #include <linux/slab.h>
0010 #include <linux/tee_drv.h>
0011 #include <linux/types.h>
0012 #include "optee_private.h"
0013 
0014 #define MAX_ARG_PARAM_COUNT 6
0015 
0016 /*
0017  * How much memory we allocate for each entry. This doesn't have to be a
0018  * single page, but it makes sense to keep at least keep it as multiples of
0019  * the page size.
0020  */
0021 #define SHM_ENTRY_SIZE      PAGE_SIZE
0022 
0023 /*
0024  * We need to have a compile time constant to be able to determine the
0025  * maximum needed size of the bit field.
0026  */
0027 #define MIN_ARG_SIZE        OPTEE_MSG_GET_ARG_SIZE(MAX_ARG_PARAM_COUNT)
0028 #define MAX_ARG_COUNT_PER_ENTRY (SHM_ENTRY_SIZE / MIN_ARG_SIZE)
0029 
0030 /*
0031  * Shared memory for argument structs are cached here. The number of
0032  * arguments structs that can fit is determined at runtime depending on the
0033  * needed RPC parameter count reported by secure world
0034  * (optee->rpc_param_count).
0035  */
0036 struct optee_shm_arg_entry {
0037     struct list_head list_node;
0038     struct tee_shm *shm;
0039     DECLARE_BITMAP(map, MAX_ARG_COUNT_PER_ENTRY);
0040 };
0041 
0042 void optee_cq_wait_init(struct optee_call_queue *cq,
0043             struct optee_call_waiter *w)
0044 {
0045     /*
0046      * We're preparing to make a call to secure world. In case we can't
0047      * allocate a thread in secure world we'll end up waiting in
0048      * optee_cq_wait_for_completion().
0049      *
0050      * Normally if there's no contention in secure world the call will
0051      * complete and we can cleanup directly with optee_cq_wait_final().
0052      */
0053     mutex_lock(&cq->mutex);
0054 
0055     /*
0056      * We add ourselves to the queue, but we don't wait. This
0057      * guarantees that we don't lose a completion if secure world
0058      * returns busy and another thread just exited and try to complete
0059      * someone.
0060      */
0061     init_completion(&w->c);
0062     list_add_tail(&w->list_node, &cq->waiters);
0063 
0064     mutex_unlock(&cq->mutex);
0065 }
0066 
0067 void optee_cq_wait_for_completion(struct optee_call_queue *cq,
0068                   struct optee_call_waiter *w)
0069 {
0070     wait_for_completion(&w->c);
0071 
0072     mutex_lock(&cq->mutex);
0073 
0074     /* Move to end of list to get out of the way for other waiters */
0075     list_del(&w->list_node);
0076     reinit_completion(&w->c);
0077     list_add_tail(&w->list_node, &cq->waiters);
0078 
0079     mutex_unlock(&cq->mutex);
0080 }
0081 
0082 static void optee_cq_complete_one(struct optee_call_queue *cq)
0083 {
0084     struct optee_call_waiter *w;
0085 
0086     list_for_each_entry(w, &cq->waiters, list_node) {
0087         if (!completion_done(&w->c)) {
0088             complete(&w->c);
0089             break;
0090         }
0091     }
0092 }
0093 
0094 void optee_cq_wait_final(struct optee_call_queue *cq,
0095              struct optee_call_waiter *w)
0096 {
0097     /*
0098      * We're done with the call to secure world. The thread in secure
0099      * world that was used for this call is now available for some
0100      * other task to use.
0101      */
0102     mutex_lock(&cq->mutex);
0103 
0104     /* Get out of the list */
0105     list_del(&w->list_node);
0106 
0107     /* Wake up one eventual waiting task */
0108     optee_cq_complete_one(cq);
0109 
0110     /*
0111      * If we're completed we've got a completion from another task that
0112      * was just done with its call to secure world. Since yet another
0113      * thread now is available in secure world wake up another eventual
0114      * waiting task.
0115      */
0116     if (completion_done(&w->c))
0117         optee_cq_complete_one(cq);
0118 
0119     mutex_unlock(&cq->mutex);
0120 }
0121 
0122 /* Requires the filpstate mutex to be held */
0123 static struct optee_session *find_session(struct optee_context_data *ctxdata,
0124                       u32 session_id)
0125 {
0126     struct optee_session *sess;
0127 
0128     list_for_each_entry(sess, &ctxdata->sess_list, list_node)
0129         if (sess->session_id == session_id)
0130             return sess;
0131 
0132     return NULL;
0133 }
0134 
0135 void optee_shm_arg_cache_init(struct optee *optee, u32 flags)
0136 {
0137     INIT_LIST_HEAD(&optee->shm_arg_cache.shm_args);
0138     mutex_init(&optee->shm_arg_cache.mutex);
0139     optee->shm_arg_cache.flags = flags;
0140 }
0141 
0142 void optee_shm_arg_cache_uninit(struct optee *optee)
0143 {
0144     struct list_head *head = &optee->shm_arg_cache.shm_args;
0145     struct optee_shm_arg_entry *entry;
0146 
0147     mutex_destroy(&optee->shm_arg_cache.mutex);
0148     while (!list_empty(head)) {
0149         entry = list_first_entry(head, struct optee_shm_arg_entry,
0150                      list_node);
0151         list_del(&entry->list_node);
0152         if (find_first_bit(entry->map, MAX_ARG_COUNT_PER_ENTRY) !=
0153              MAX_ARG_COUNT_PER_ENTRY) {
0154             pr_err("Freeing non-free entry\n");
0155         }
0156         tee_shm_free(entry->shm);
0157         kfree(entry);
0158     }
0159 }
0160 
0161 size_t optee_msg_arg_size(size_t rpc_param_count)
0162 {
0163     size_t sz = OPTEE_MSG_GET_ARG_SIZE(MAX_ARG_PARAM_COUNT);
0164 
0165     if (rpc_param_count)
0166         sz += OPTEE_MSG_GET_ARG_SIZE(rpc_param_count);
0167 
0168     return sz;
0169 }
0170 
0171 /**
0172  * optee_get_msg_arg() - Provide shared memory for argument struct
0173  * @ctx:    Caller TEE context
0174  * @num_params: Number of parameter to store
0175  * @entry_ret:  Entry pointer, needed when freeing the buffer
0176  * @shm_ret:    Shared memory buffer
0177  * @offs_ret:   Offset of argument strut in shared memory buffer
0178  *
0179  * @returns a pointer to the argument struct in memory, else an ERR_PTR
0180  */
0181 struct optee_msg_arg *optee_get_msg_arg(struct tee_context *ctx,
0182                     size_t num_params,
0183                     struct optee_shm_arg_entry **entry_ret,
0184                     struct tee_shm **shm_ret,
0185                     u_int *offs_ret)
0186 {
0187     struct optee *optee = tee_get_drvdata(ctx->teedev);
0188     size_t sz = optee_msg_arg_size(optee->rpc_param_count);
0189     struct optee_shm_arg_entry *entry;
0190     struct optee_msg_arg *ma;
0191     size_t args_per_entry;
0192     u_long bit;
0193     u_int offs;
0194     void *res;
0195 
0196     if (num_params > MAX_ARG_PARAM_COUNT)
0197         return ERR_PTR(-EINVAL);
0198 
0199     if (optee->shm_arg_cache.flags & OPTEE_SHM_ARG_SHARED)
0200         args_per_entry = SHM_ENTRY_SIZE / sz;
0201     else
0202         args_per_entry = 1;
0203 
0204     mutex_lock(&optee->shm_arg_cache.mutex);
0205     list_for_each_entry(entry, &optee->shm_arg_cache.shm_args, list_node) {
0206         bit = find_first_zero_bit(entry->map, MAX_ARG_COUNT_PER_ENTRY);
0207         if (bit < args_per_entry)
0208             goto have_entry;
0209     }
0210 
0211     /*
0212      * No entry was found, let's allocate a new.
0213      */
0214     entry = kzalloc(sizeof(*entry), GFP_KERNEL);
0215     if (!entry) {
0216         res = ERR_PTR(-ENOMEM);
0217         goto out;
0218     }
0219 
0220     if (optee->shm_arg_cache.flags & OPTEE_SHM_ARG_ALLOC_PRIV)
0221         res = tee_shm_alloc_priv_buf(ctx, SHM_ENTRY_SIZE);
0222     else
0223         res = tee_shm_alloc_kernel_buf(ctx, SHM_ENTRY_SIZE);
0224 
0225     if (IS_ERR(res)) {
0226         kfree(entry);
0227         goto out;
0228     }
0229     entry->shm = res;
0230     list_add(&entry->list_node, &optee->shm_arg_cache.shm_args);
0231     bit = 0;
0232 
0233 have_entry:
0234     offs = bit * sz;
0235     res = tee_shm_get_va(entry->shm, offs);
0236     if (IS_ERR(res))
0237         goto out;
0238     ma = res;
0239     set_bit(bit, entry->map);
0240     memset(ma, 0, sz);
0241     ma->num_params = num_params;
0242     *entry_ret = entry;
0243     *shm_ret = entry->shm;
0244     *offs_ret = offs;
0245 out:
0246     mutex_unlock(&optee->shm_arg_cache.mutex);
0247     return res;
0248 }
0249 
0250 /**
0251  * optee_free_msg_arg() - Free previsouly obtained shared memory
0252  * @ctx:    Caller TEE context
0253  * @entry:  Pointer returned when the shared memory was obtained
0254  * @offs:   Offset of shared memory buffer to free
0255  *
0256  * This function frees the shared memory obtained with optee_get_msg_arg().
0257  */
0258 void optee_free_msg_arg(struct tee_context *ctx,
0259             struct optee_shm_arg_entry *entry, u_int offs)
0260 {
0261     struct optee *optee = tee_get_drvdata(ctx->teedev);
0262     size_t sz = optee_msg_arg_size(optee->rpc_param_count);
0263     u_long bit;
0264 
0265     if (offs > SHM_ENTRY_SIZE || offs % sz) {
0266         pr_err("Invalid offs %u\n", offs);
0267         return;
0268     }
0269     bit = offs / sz;
0270 
0271     mutex_lock(&optee->shm_arg_cache.mutex);
0272 
0273     if (!test_bit(bit, entry->map))
0274         pr_err("Bit pos %lu is already free\n", bit);
0275     clear_bit(bit, entry->map);
0276 
0277     mutex_unlock(&optee->shm_arg_cache.mutex);
0278 }
0279 
0280 int optee_open_session(struct tee_context *ctx,
0281                struct tee_ioctl_open_session_arg *arg,
0282                struct tee_param *param)
0283 {
0284     struct optee *optee = tee_get_drvdata(ctx->teedev);
0285     struct optee_context_data *ctxdata = ctx->data;
0286     struct optee_shm_arg_entry *entry;
0287     struct tee_shm *shm;
0288     struct optee_msg_arg *msg_arg;
0289     struct optee_session *sess = NULL;
0290     uuid_t client_uuid;
0291     u_int offs;
0292     int rc;
0293 
0294     /* +2 for the meta parameters added below */
0295     msg_arg = optee_get_msg_arg(ctx, arg->num_params + 2,
0296                     &entry, &shm, &offs);
0297     if (IS_ERR(msg_arg))
0298         return PTR_ERR(msg_arg);
0299 
0300     msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
0301     msg_arg->cancel_id = arg->cancel_id;
0302 
0303     /*
0304      * Initialize and add the meta parameters needed when opening a
0305      * session.
0306      */
0307     msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
0308                   OPTEE_MSG_ATTR_META;
0309     msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
0310                   OPTEE_MSG_ATTR_META;
0311     memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
0312     msg_arg->params[1].u.value.c = arg->clnt_login;
0313 
0314     rc = tee_session_calc_client_uuid(&client_uuid, arg->clnt_login,
0315                       arg->clnt_uuid);
0316     if (rc)
0317         goto out;
0318     export_uuid(msg_arg->params[1].u.octets, &client_uuid);
0319 
0320     rc = optee->ops->to_msg_param(optee, msg_arg->params + 2,
0321                       arg->num_params, param);
0322     if (rc)
0323         goto out;
0324 
0325     sess = kzalloc(sizeof(*sess), GFP_KERNEL);
0326     if (!sess) {
0327         rc = -ENOMEM;
0328         goto out;
0329     }
0330 
0331     if (optee->ops->do_call_with_arg(ctx, shm, offs)) {
0332         msg_arg->ret = TEEC_ERROR_COMMUNICATION;
0333         msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
0334     }
0335 
0336     if (msg_arg->ret == TEEC_SUCCESS) {
0337         /* A new session has been created, add it to the list. */
0338         sess->session_id = msg_arg->session;
0339         mutex_lock(&ctxdata->mutex);
0340         list_add(&sess->list_node, &ctxdata->sess_list);
0341         mutex_unlock(&ctxdata->mutex);
0342     } else {
0343         kfree(sess);
0344     }
0345 
0346     if (optee->ops->from_msg_param(optee, param, arg->num_params,
0347                        msg_arg->params + 2)) {
0348         arg->ret = TEEC_ERROR_COMMUNICATION;
0349         arg->ret_origin = TEEC_ORIGIN_COMMS;
0350         /* Close session again to avoid leakage */
0351         optee_close_session(ctx, msg_arg->session);
0352     } else {
0353         arg->session = msg_arg->session;
0354         arg->ret = msg_arg->ret;
0355         arg->ret_origin = msg_arg->ret_origin;
0356     }
0357 out:
0358     optee_free_msg_arg(ctx, entry, offs);
0359 
0360     return rc;
0361 }
0362 
0363 int optee_close_session_helper(struct tee_context *ctx, u32 session)
0364 {
0365     struct optee *optee = tee_get_drvdata(ctx->teedev);
0366     struct optee_shm_arg_entry *entry;
0367     struct optee_msg_arg *msg_arg;
0368     struct tee_shm *shm;
0369     u_int offs;
0370 
0371     msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
0372     if (IS_ERR(msg_arg))
0373         return PTR_ERR(msg_arg);
0374 
0375     msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
0376     msg_arg->session = session;
0377     optee->ops->do_call_with_arg(ctx, shm, offs);
0378 
0379     optee_free_msg_arg(ctx, entry, offs);
0380 
0381     return 0;
0382 }
0383 
0384 int optee_close_session(struct tee_context *ctx, u32 session)
0385 {
0386     struct optee_context_data *ctxdata = ctx->data;
0387     struct optee_session *sess;
0388 
0389     /* Check that the session is valid and remove it from the list */
0390     mutex_lock(&ctxdata->mutex);
0391     sess = find_session(ctxdata, session);
0392     if (sess)
0393         list_del(&sess->list_node);
0394     mutex_unlock(&ctxdata->mutex);
0395     if (!sess)
0396         return -EINVAL;
0397     kfree(sess);
0398 
0399     return optee_close_session_helper(ctx, session);
0400 }
0401 
0402 int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
0403               struct tee_param *param)
0404 {
0405     struct optee *optee = tee_get_drvdata(ctx->teedev);
0406     struct optee_context_data *ctxdata = ctx->data;
0407     struct optee_shm_arg_entry *entry;
0408     struct optee_msg_arg *msg_arg;
0409     struct optee_session *sess;
0410     struct tee_shm *shm;
0411     u_int offs;
0412     int rc;
0413 
0414     /* Check that the session is valid */
0415     mutex_lock(&ctxdata->mutex);
0416     sess = find_session(ctxdata, arg->session);
0417     mutex_unlock(&ctxdata->mutex);
0418     if (!sess)
0419         return -EINVAL;
0420 
0421     msg_arg = optee_get_msg_arg(ctx, arg->num_params,
0422                     &entry, &shm, &offs);
0423     if (IS_ERR(msg_arg))
0424         return PTR_ERR(msg_arg);
0425     msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
0426     msg_arg->func = arg->func;
0427     msg_arg->session = arg->session;
0428     msg_arg->cancel_id = arg->cancel_id;
0429 
0430     rc = optee->ops->to_msg_param(optee, msg_arg->params, arg->num_params,
0431                       param);
0432     if (rc)
0433         goto out;
0434 
0435     if (optee->ops->do_call_with_arg(ctx, shm, offs)) {
0436         msg_arg->ret = TEEC_ERROR_COMMUNICATION;
0437         msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
0438     }
0439 
0440     if (optee->ops->from_msg_param(optee, param, arg->num_params,
0441                        msg_arg->params)) {
0442         msg_arg->ret = TEEC_ERROR_COMMUNICATION;
0443         msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
0444     }
0445 
0446     arg->ret = msg_arg->ret;
0447     arg->ret_origin = msg_arg->ret_origin;
0448 out:
0449     optee_free_msg_arg(ctx, entry, offs);
0450     return rc;
0451 }
0452 
0453 int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
0454 {
0455     struct optee *optee = tee_get_drvdata(ctx->teedev);
0456     struct optee_context_data *ctxdata = ctx->data;
0457     struct optee_shm_arg_entry *entry;
0458     struct optee_msg_arg *msg_arg;
0459     struct optee_session *sess;
0460     struct tee_shm *shm;
0461     u_int offs;
0462 
0463     /* Check that the session is valid */
0464     mutex_lock(&ctxdata->mutex);
0465     sess = find_session(ctxdata, session);
0466     mutex_unlock(&ctxdata->mutex);
0467     if (!sess)
0468         return -EINVAL;
0469 
0470     msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
0471     if (IS_ERR(msg_arg))
0472         return PTR_ERR(msg_arg);
0473 
0474     msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
0475     msg_arg->session = session;
0476     msg_arg->cancel_id = cancel_id;
0477     optee->ops->do_call_with_arg(ctx, shm, offs);
0478 
0479     optee_free_msg_arg(ctx, entry, offs);
0480     return 0;
0481 }
0482 
0483 static bool is_normal_memory(pgprot_t p)
0484 {
0485 #if defined(CONFIG_ARM)
0486     return (((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC) ||
0487         ((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK));
0488 #elif defined(CONFIG_ARM64)
0489     return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
0490 #else
0491 #error "Unuspported architecture"
0492 #endif
0493 }
0494 
0495 static int __check_mem_type(struct vm_area_struct *vma, unsigned long end)
0496 {
0497     while (vma && is_normal_memory(vma->vm_page_prot)) {
0498         if (vma->vm_end >= end)
0499             return 0;
0500         vma = vma->vm_next;
0501     }
0502 
0503     return -EINVAL;
0504 }
0505 
0506 int optee_check_mem_type(unsigned long start, size_t num_pages)
0507 {
0508     struct mm_struct *mm = current->mm;
0509     int rc;
0510 
0511     /*
0512      * Allow kernel address to register with OP-TEE as kernel
0513      * pages are configured as normal memory only.
0514      */
0515     if (virt_addr_valid((void *)start) || is_vmalloc_addr((void *)start))
0516         return 0;
0517 
0518     mmap_read_lock(mm);
0519     rc = __check_mem_type(find_vma(mm, start),
0520                   start + num_pages * PAGE_SIZE);
0521     mmap_read_unlock(mm);
0522 
0523     return rc;
0524 }