0001
0002
0003
0004
0005
0006 #include <linux/clk.h>
0007 #include <linux/dma-mapping.h>
0008 #include <linux/interrupt.h>
0009 #include <linux/module.h>
0010 #include <linux/mod_devicetable.h>
0011 #include <linux/platform_device.h>
0012 #include <linux/spinlock.h>
0013 #include <linux/types.h>
0014 #include <crypto/algapi.h>
0015 #include <crypto/internal/hash.h>
0016
0017 #include "core.h"
0018 #include "cipher.h"
0019 #include "sha.h"
0020 #include "aead.h"
0021
0022 #define QCE_MAJOR_VERSION5 0x05
0023 #define QCE_QUEUE_LENGTH 1
0024
0025 static const struct qce_algo_ops *qce_ops[] = {
0026 #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
0027 &skcipher_ops,
0028 #endif
0029 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
0030 &ahash_ops,
0031 #endif
0032 #ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
0033 &aead_ops,
0034 #endif
0035 };
0036
0037 static void qce_unregister_algs(struct qce_device *qce)
0038 {
0039 const struct qce_algo_ops *ops;
0040 int i;
0041
0042 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
0043 ops = qce_ops[i];
0044 ops->unregister_algs(qce);
0045 }
0046 }
0047
0048 static int qce_register_algs(struct qce_device *qce)
0049 {
0050 const struct qce_algo_ops *ops;
0051 int i, ret = -ENODEV;
0052
0053 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
0054 ops = qce_ops[i];
0055 ret = ops->register_algs(qce);
0056 if (ret)
0057 break;
0058 }
0059
0060 return ret;
0061 }
0062
0063 static int qce_handle_request(struct crypto_async_request *async_req)
0064 {
0065 int ret = -EINVAL, i;
0066 const struct qce_algo_ops *ops;
0067 u32 type = crypto_tfm_alg_type(async_req->tfm);
0068
0069 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
0070 ops = qce_ops[i];
0071 if (type != ops->type)
0072 continue;
0073 ret = ops->async_req_handle(async_req);
0074 break;
0075 }
0076
0077 return ret;
0078 }
0079
0080 static int qce_handle_queue(struct qce_device *qce,
0081 struct crypto_async_request *req)
0082 {
0083 struct crypto_async_request *async_req, *backlog;
0084 unsigned long flags;
0085 int ret = 0, err;
0086
0087 spin_lock_irqsave(&qce->lock, flags);
0088
0089 if (req)
0090 ret = crypto_enqueue_request(&qce->queue, req);
0091
0092
0093 if (qce->req) {
0094 spin_unlock_irqrestore(&qce->lock, flags);
0095 return ret;
0096 }
0097
0098 backlog = crypto_get_backlog(&qce->queue);
0099 async_req = crypto_dequeue_request(&qce->queue);
0100 if (async_req)
0101 qce->req = async_req;
0102
0103 spin_unlock_irqrestore(&qce->lock, flags);
0104
0105 if (!async_req)
0106 return ret;
0107
0108 if (backlog) {
0109 spin_lock_bh(&qce->lock);
0110 backlog->complete(backlog, -EINPROGRESS);
0111 spin_unlock_bh(&qce->lock);
0112 }
0113
0114 err = qce_handle_request(async_req);
0115 if (err) {
0116 qce->result = err;
0117 tasklet_schedule(&qce->done_tasklet);
0118 }
0119
0120 return ret;
0121 }
0122
0123 static void qce_tasklet_req_done(unsigned long data)
0124 {
0125 struct qce_device *qce = (struct qce_device *)data;
0126 struct crypto_async_request *req;
0127 unsigned long flags;
0128
0129 spin_lock_irqsave(&qce->lock, flags);
0130 req = qce->req;
0131 qce->req = NULL;
0132 spin_unlock_irqrestore(&qce->lock, flags);
0133
0134 if (req)
0135 req->complete(req, qce->result);
0136
0137 qce_handle_queue(qce, NULL);
0138 }
0139
0140 static int qce_async_request_enqueue(struct qce_device *qce,
0141 struct crypto_async_request *req)
0142 {
0143 return qce_handle_queue(qce, req);
0144 }
0145
0146 static void qce_async_request_done(struct qce_device *qce, int ret)
0147 {
0148 qce->result = ret;
0149 tasklet_schedule(&qce->done_tasklet);
0150 }
0151
0152 static int qce_check_version(struct qce_device *qce)
0153 {
0154 u32 major, minor, step;
0155
0156 qce_get_version(qce, &major, &minor, &step);
0157
0158
0159
0160
0161
0162 if (major != QCE_MAJOR_VERSION5 || minor == 0)
0163 return -ENODEV;
0164
0165 qce->burst_size = QCE_BAM_BURST_SIZE;
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180 qce->pipe_pair_id = qce->dma.rxchan->chan_id >> 1;
0181
0182 dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n",
0183 major, minor, step);
0184
0185 return 0;
0186 }
0187
0188 static int qce_crypto_probe(struct platform_device *pdev)
0189 {
0190 struct device *dev = &pdev->dev;
0191 struct qce_device *qce;
0192 int ret;
0193
0194 qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
0195 if (!qce)
0196 return -ENOMEM;
0197
0198 qce->dev = dev;
0199 platform_set_drvdata(pdev, qce);
0200
0201 qce->base = devm_platform_ioremap_resource(pdev, 0);
0202 if (IS_ERR(qce->base))
0203 return PTR_ERR(qce->base);
0204
0205 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
0206 if (ret < 0)
0207 return ret;
0208
0209 qce->core = devm_clk_get(qce->dev, "core");
0210 if (IS_ERR(qce->core))
0211 return PTR_ERR(qce->core);
0212
0213 qce->iface = devm_clk_get(qce->dev, "iface");
0214 if (IS_ERR(qce->iface))
0215 return PTR_ERR(qce->iface);
0216
0217 qce->bus = devm_clk_get(qce->dev, "bus");
0218 if (IS_ERR(qce->bus))
0219 return PTR_ERR(qce->bus);
0220
0221 ret = clk_prepare_enable(qce->core);
0222 if (ret)
0223 return ret;
0224
0225 ret = clk_prepare_enable(qce->iface);
0226 if (ret)
0227 goto err_clks_core;
0228
0229 ret = clk_prepare_enable(qce->bus);
0230 if (ret)
0231 goto err_clks_iface;
0232
0233 ret = qce_dma_request(qce->dev, &qce->dma);
0234 if (ret)
0235 goto err_clks;
0236
0237 ret = qce_check_version(qce);
0238 if (ret)
0239 goto err_clks;
0240
0241 spin_lock_init(&qce->lock);
0242 tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
0243 (unsigned long)qce);
0244 crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
0245
0246 qce->async_req_enqueue = qce_async_request_enqueue;
0247 qce->async_req_done = qce_async_request_done;
0248
0249 ret = qce_register_algs(qce);
0250 if (ret)
0251 goto err_dma;
0252
0253 return 0;
0254
0255 err_dma:
0256 qce_dma_release(&qce->dma);
0257 err_clks:
0258 clk_disable_unprepare(qce->bus);
0259 err_clks_iface:
0260 clk_disable_unprepare(qce->iface);
0261 err_clks_core:
0262 clk_disable_unprepare(qce->core);
0263 return ret;
0264 }
0265
0266 static int qce_crypto_remove(struct platform_device *pdev)
0267 {
0268 struct qce_device *qce = platform_get_drvdata(pdev);
0269
0270 tasklet_kill(&qce->done_tasklet);
0271 qce_unregister_algs(qce);
0272 qce_dma_release(&qce->dma);
0273 clk_disable_unprepare(qce->bus);
0274 clk_disable_unprepare(qce->iface);
0275 clk_disable_unprepare(qce->core);
0276 return 0;
0277 }
0278
0279 static const struct of_device_id qce_crypto_of_match[] = {
0280 { .compatible = "qcom,crypto-v5.1", },
0281 { .compatible = "qcom,crypto-v5.4", },
0282 {}
0283 };
0284 MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
0285
0286 static struct platform_driver qce_crypto_driver = {
0287 .probe = qce_crypto_probe,
0288 .remove = qce_crypto_remove,
0289 .driver = {
0290 .name = KBUILD_MODNAME,
0291 .of_match_table = qce_crypto_of_match,
0292 },
0293 };
0294 module_platform_driver(qce_crypto_driver);
0295
0296 MODULE_LICENSE("GPL v2");
0297 MODULE_DESCRIPTION("Qualcomm crypto engine driver");
0298 MODULE_ALIAS("platform:" KBUILD_MODNAME);
0299 MODULE_AUTHOR("The Linux Foundation");