Back to home page

LXR

 
 

    


0001 /*
0002  * Handle async block request by crypto hardware engine.
0003  *
0004  * Copyright (C) 2016 Linaro, Inc.
0005  *
0006  * Author: Baolin Wang <baolin.wang@linaro.org>
0007  *
0008  * This program is free software; you can redistribute it and/or modify it
0009  * under the terms of the GNU General Public License as published by the Free
0010  * Software Foundation; either version 2 of the License, or (at your option)
0011  * any later version.
0012  *
0013  */
0014 
0015 #include <linux/err.h>
0016 #include <linux/delay.h>
0017 #include <crypto/engine.h>
0018 #include <crypto/internal/hash.h>
0019 #include "internal.h"
0020 
0021 #define CRYPTO_ENGINE_MAX_QLEN 10
0022 
0023 /**
0024  * crypto_pump_requests - dequeue one request from engine queue to process
0025  * @engine: the hardware engine
0026  * @in_kthread: true if we are in the context of the request pump thread
0027  *
0028  * This function checks if there is any request in the engine queue that
0029  * needs processing and if so call out to the driver to initialize hardware
0030  * and handle each request.
0031  */
0032 static void crypto_pump_requests(struct crypto_engine *engine,
0033                  bool in_kthread)
0034 {
0035     struct crypto_async_request *async_req, *backlog;
0036     struct ahash_request *hreq;
0037     struct ablkcipher_request *breq;
0038     unsigned long flags;
0039     bool was_busy = false;
0040     int ret, rtype;
0041 
0042     spin_lock_irqsave(&engine->queue_lock, flags);
0043 
0044     /* Make sure we are not already running a request */
0045     if (engine->cur_req)
0046         goto out;
0047 
0048     /* If another context is idling then defer */
0049     if (engine->idling) {
0050         kthread_queue_work(engine->kworker, &engine->pump_requests);
0051         goto out;
0052     }
0053 
0054     /* Check if the engine queue is idle */
0055     if (!crypto_queue_len(&engine->queue) || !engine->running) {
0056         if (!engine->busy)
0057             goto out;
0058 
0059         /* Only do teardown in the thread */
0060         if (!in_kthread) {
0061             kthread_queue_work(engine->kworker,
0062                        &engine->pump_requests);
0063             goto out;
0064         }
0065 
0066         engine->busy = false;
0067         engine->idling = true;
0068         spin_unlock_irqrestore(&engine->queue_lock, flags);
0069 
0070         if (engine->unprepare_crypt_hardware &&
0071             engine->unprepare_crypt_hardware(engine))
0072             pr_err("failed to unprepare crypt hardware\n");
0073 
0074         spin_lock_irqsave(&engine->queue_lock, flags);
0075         engine->idling = false;
0076         goto out;
0077     }
0078 
0079     /* Get the fist request from the engine queue to handle */
0080     backlog = crypto_get_backlog(&engine->queue);
0081     async_req = crypto_dequeue_request(&engine->queue);
0082     if (!async_req)
0083         goto out;
0084 
0085     engine->cur_req = async_req;
0086     if (backlog)
0087         backlog->complete(backlog, -EINPROGRESS);
0088 
0089     if (engine->busy)
0090         was_busy = true;
0091     else
0092         engine->busy = true;
0093 
0094     spin_unlock_irqrestore(&engine->queue_lock, flags);
0095 
0096     rtype = crypto_tfm_alg_type(engine->cur_req->tfm);
0097     /* Until here we get the request need to be encrypted successfully */
0098     if (!was_busy && engine->prepare_crypt_hardware) {
0099         ret = engine->prepare_crypt_hardware(engine);
0100         if (ret) {
0101             pr_err("failed to prepare crypt hardware\n");
0102             goto req_err;
0103         }
0104     }
0105 
0106     switch (rtype) {
0107     case CRYPTO_ALG_TYPE_AHASH:
0108         hreq = ahash_request_cast(engine->cur_req);
0109         if (engine->prepare_hash_request) {
0110             ret = engine->prepare_hash_request(engine, hreq);
0111             if (ret) {
0112                 pr_err("failed to prepare request: %d\n", ret);
0113                 goto req_err;
0114             }
0115             engine->cur_req_prepared = true;
0116         }
0117         ret = engine->hash_one_request(engine, hreq);
0118         if (ret) {
0119             pr_err("failed to hash one request from queue\n");
0120             goto req_err;
0121         }
0122         return;
0123     case CRYPTO_ALG_TYPE_ABLKCIPHER:
0124         breq = ablkcipher_request_cast(engine->cur_req);
0125         if (engine->prepare_cipher_request) {
0126             ret = engine->prepare_cipher_request(engine, breq);
0127             if (ret) {
0128                 pr_err("failed to prepare request: %d\n", ret);
0129                 goto req_err;
0130             }
0131             engine->cur_req_prepared = true;
0132         }
0133         ret = engine->cipher_one_request(engine, breq);
0134         if (ret) {
0135             pr_err("failed to cipher one request from queue\n");
0136             goto req_err;
0137         }
0138         return;
0139     default:
0140         pr_err("failed to prepare request of unknown type\n");
0141         return;
0142     }
0143 
0144 req_err:
0145     switch (rtype) {
0146     case CRYPTO_ALG_TYPE_AHASH:
0147         hreq = ahash_request_cast(engine->cur_req);
0148         crypto_finalize_hash_request(engine, hreq, ret);
0149         break;
0150     case CRYPTO_ALG_TYPE_ABLKCIPHER:
0151         breq = ablkcipher_request_cast(engine->cur_req);
0152         crypto_finalize_cipher_request(engine, breq, ret);
0153         break;
0154     }
0155     return;
0156 
0157 out:
0158     spin_unlock_irqrestore(&engine->queue_lock, flags);
0159 }
0160 
0161 static void crypto_pump_work(struct kthread_work *work)
0162 {
0163     struct crypto_engine *engine =
0164         container_of(work, struct crypto_engine, pump_requests);
0165 
0166     crypto_pump_requests(engine, true);
0167 }
0168 
0169 /**
0170  * crypto_transfer_cipher_request - transfer the new request into the
0171  * enginequeue
0172  * @engine: the hardware engine
0173  * @req: the request need to be listed into the engine queue
0174  */
0175 int crypto_transfer_cipher_request(struct crypto_engine *engine,
0176                    struct ablkcipher_request *req,
0177                    bool need_pump)
0178 {
0179     unsigned long flags;
0180     int ret;
0181 
0182     spin_lock_irqsave(&engine->queue_lock, flags);
0183 
0184     if (!engine->running) {
0185         spin_unlock_irqrestore(&engine->queue_lock, flags);
0186         return -ESHUTDOWN;
0187     }
0188 
0189     ret = ablkcipher_enqueue_request(&engine->queue, req);
0190 
0191     if (!engine->busy && need_pump)
0192         kthread_queue_work(engine->kworker, &engine->pump_requests);
0193 
0194     spin_unlock_irqrestore(&engine->queue_lock, flags);
0195     return ret;
0196 }
0197 EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request);
0198 
0199 /**
0200  * crypto_transfer_cipher_request_to_engine - transfer one request to list
0201  * into the engine queue
0202  * @engine: the hardware engine
0203  * @req: the request need to be listed into the engine queue
0204  */
0205 int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine,
0206                          struct ablkcipher_request *req)
0207 {
0208     return crypto_transfer_cipher_request(engine, req, true);
0209 }
0210 EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine);
0211 
0212 /**
0213  * crypto_transfer_hash_request - transfer the new request into the
0214  * enginequeue
0215  * @engine: the hardware engine
0216  * @req: the request need to be listed into the engine queue
0217  */
0218 int crypto_transfer_hash_request(struct crypto_engine *engine,
0219                  struct ahash_request *req, bool need_pump)
0220 {
0221     unsigned long flags;
0222     int ret;
0223 
0224     spin_lock_irqsave(&engine->queue_lock, flags);
0225 
0226     if (!engine->running) {
0227         spin_unlock_irqrestore(&engine->queue_lock, flags);
0228         return -ESHUTDOWN;
0229     }
0230 
0231     ret = ahash_enqueue_request(&engine->queue, req);
0232 
0233     if (!engine->busy && need_pump)
0234         kthread_queue_work(engine->kworker, &engine->pump_requests);
0235 
0236     spin_unlock_irqrestore(&engine->queue_lock, flags);
0237     return ret;
0238 }
0239 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request);
0240 
0241 /**
0242  * crypto_transfer_hash_request_to_engine - transfer one request to list
0243  * into the engine queue
0244  * @engine: the hardware engine
0245  * @req: the request need to be listed into the engine queue
0246  */
0247 int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
0248                        struct ahash_request *req)
0249 {
0250     return crypto_transfer_hash_request(engine, req, true);
0251 }
0252 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
0253 
0254 /**
0255  * crypto_finalize_cipher_request - finalize one request if the request is done
0256  * @engine: the hardware engine
0257  * @req: the request need to be finalized
0258  * @err: error number
0259  */
0260 void crypto_finalize_cipher_request(struct crypto_engine *engine,
0261                     struct ablkcipher_request *req, int err)
0262 {
0263     unsigned long flags;
0264     bool finalize_cur_req = false;
0265     int ret;
0266 
0267     spin_lock_irqsave(&engine->queue_lock, flags);
0268     if (engine->cur_req == &req->base)
0269         finalize_cur_req = true;
0270     spin_unlock_irqrestore(&engine->queue_lock, flags);
0271 
0272     if (finalize_cur_req) {
0273         if (engine->cur_req_prepared &&
0274             engine->unprepare_cipher_request) {
0275             ret = engine->unprepare_cipher_request(engine, req);
0276             if (ret)
0277                 pr_err("failed to unprepare request\n");
0278         }
0279         spin_lock_irqsave(&engine->queue_lock, flags);
0280         engine->cur_req = NULL;
0281         engine->cur_req_prepared = false;
0282         spin_unlock_irqrestore(&engine->queue_lock, flags);
0283     }
0284 
0285     req->base.complete(&req->base, err);
0286 
0287     kthread_queue_work(engine->kworker, &engine->pump_requests);
0288 }
0289 EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
0290 
0291 /**
0292  * crypto_finalize_hash_request - finalize one request if the request is done
0293  * @engine: the hardware engine
0294  * @req: the request need to be finalized
0295  * @err: error number
0296  */
0297 void crypto_finalize_hash_request(struct crypto_engine *engine,
0298                   struct ahash_request *req, int err)
0299 {
0300     unsigned long flags;
0301     bool finalize_cur_req = false;
0302     int ret;
0303 
0304     spin_lock_irqsave(&engine->queue_lock, flags);
0305     if (engine->cur_req == &req->base)
0306         finalize_cur_req = true;
0307     spin_unlock_irqrestore(&engine->queue_lock, flags);
0308 
0309     if (finalize_cur_req) {
0310         if (engine->cur_req_prepared &&
0311             engine->unprepare_hash_request) {
0312             ret = engine->unprepare_hash_request(engine, req);
0313             if (ret)
0314                 pr_err("failed to unprepare request\n");
0315         }
0316         spin_lock_irqsave(&engine->queue_lock, flags);
0317         engine->cur_req = NULL;
0318         engine->cur_req_prepared = false;
0319         spin_unlock_irqrestore(&engine->queue_lock, flags);
0320     }
0321 
0322     req->base.complete(&req->base, err);
0323 
0324     kthread_queue_work(engine->kworker, &engine->pump_requests);
0325 }
0326 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
0327 
0328 /**
0329  * crypto_engine_start - start the hardware engine
0330  * @engine: the hardware engine need to be started
0331  *
0332  * Return 0 on success, else on fail.
0333  */
0334 int crypto_engine_start(struct crypto_engine *engine)
0335 {
0336     unsigned long flags;
0337 
0338     spin_lock_irqsave(&engine->queue_lock, flags);
0339 
0340     if (engine->running || engine->busy) {
0341         spin_unlock_irqrestore(&engine->queue_lock, flags);
0342         return -EBUSY;
0343     }
0344 
0345     engine->running = true;
0346     spin_unlock_irqrestore(&engine->queue_lock, flags);
0347 
0348     kthread_queue_work(engine->kworker, &engine->pump_requests);
0349 
0350     return 0;
0351 }
0352 EXPORT_SYMBOL_GPL(crypto_engine_start);
0353 
0354 /**
0355  * crypto_engine_stop - stop the hardware engine
0356  * @engine: the hardware engine need to be stopped
0357  *
0358  * Return 0 on success, else on fail.
0359  */
0360 int crypto_engine_stop(struct crypto_engine *engine)
0361 {
0362     unsigned long flags;
0363     unsigned int limit = 500;
0364     int ret = 0;
0365 
0366     spin_lock_irqsave(&engine->queue_lock, flags);
0367 
0368     /*
0369      * If the engine queue is not empty or the engine is on busy state,
0370      * we need to wait for a while to pump the requests of engine queue.
0371      */
0372     while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
0373         spin_unlock_irqrestore(&engine->queue_lock, flags);
0374         msleep(20);
0375         spin_lock_irqsave(&engine->queue_lock, flags);
0376     }
0377 
0378     if (crypto_queue_len(&engine->queue) || engine->busy)
0379         ret = -EBUSY;
0380     else
0381         engine->running = false;
0382 
0383     spin_unlock_irqrestore(&engine->queue_lock, flags);
0384 
0385     if (ret)
0386         pr_warn("could not stop engine\n");
0387 
0388     return ret;
0389 }
0390 EXPORT_SYMBOL_GPL(crypto_engine_stop);
0391 
0392 /**
0393  * crypto_engine_alloc_init - allocate crypto hardware engine structure and
0394  * initialize it.
0395  * @dev: the device attached with one hardware engine
0396  * @rt: whether this queue is set to run as a realtime task
0397  *
0398  * This must be called from context that can sleep.
0399  * Return: the crypto engine structure on success, else NULL.
0400  */
0401 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
0402 {
0403     struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
0404     struct crypto_engine *engine;
0405 
0406     if (!dev)
0407         return NULL;
0408 
0409     engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
0410     if (!engine)
0411         return NULL;
0412 
0413     engine->rt = rt;
0414     engine->running = false;
0415     engine->busy = false;
0416     engine->idling = false;
0417     engine->cur_req_prepared = false;
0418     engine->priv_data = dev;
0419     snprintf(engine->name, sizeof(engine->name),
0420          "%s-engine", dev_name(dev));
0421 
0422     crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
0423     spin_lock_init(&engine->queue_lock);
0424 
0425     engine->kworker = kthread_create_worker(0, "%s", engine->name);
0426     if (IS_ERR(engine->kworker)) {
0427         dev_err(dev, "failed to create crypto request pump task\n");
0428         return NULL;
0429     }
0430     kthread_init_work(&engine->pump_requests, crypto_pump_work);
0431 
0432     if (engine->rt) {
0433         dev_info(dev, "will run requests pump with realtime priority\n");
0434         sched_setscheduler(engine->kworker->task, SCHED_FIFO, &param);
0435     }
0436 
0437     return engine;
0438 }
0439 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
0440 
0441 /**
0442  * crypto_engine_exit - free the resources of hardware engine when exit
0443  * @engine: the hardware engine need to be freed
0444  *
0445  * Return 0 for success.
0446  */
0447 int crypto_engine_exit(struct crypto_engine *engine)
0448 {
0449     int ret;
0450 
0451     ret = crypto_engine_stop(engine);
0452     if (ret)
0453         return ret;
0454 
0455     kthread_destroy_worker(engine->kworker);
0456 
0457     return 0;
0458 }
0459 EXPORT_SYMBOL_GPL(crypto_engine_exit);
0460 
0461 MODULE_LICENSE("GPL");
0462 MODULE_DESCRIPTION("Crypto hardware engine framework");